mirror of https://github.com/telemt/telemt.git
Phase 2 implemented with additional guards
This commit is contained in:
parent
a9f695623d
commit
6ea867ce36
|
|
@ -14,6 +14,7 @@ use crate::crypto::SecureRandom;
|
||||||
use crate::ip_tracker::UserIpTracker;
|
use crate::ip_tracker::UserIpTracker;
|
||||||
use crate::proxy::ClientHandler;
|
use crate::proxy::ClientHandler;
|
||||||
use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController};
|
use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController};
|
||||||
|
use crate::proxy::shared_state::ProxySharedState;
|
||||||
use crate::startup::{COMPONENT_LISTENERS_BIND, StartupTracker};
|
use crate::startup::{COMPONENT_LISTENERS_BIND, StartupTracker};
|
||||||
use crate::stats::beobachten::BeobachtenStore;
|
use crate::stats::beobachten::BeobachtenStore;
|
||||||
use crate::stats::{ReplayChecker, Stats};
|
use crate::stats::{ReplayChecker, Stats};
|
||||||
|
|
@ -49,6 +50,7 @@ pub(crate) async fn bind_listeners(
|
||||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
beobachten: Arc<BeobachtenStore>,
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
shared: Arc<ProxySharedState>,
|
||||||
max_connections: Arc<Semaphore>,
|
max_connections: Arc<Semaphore>,
|
||||||
) -> Result<BoundListeners, Box<dyn Error>> {
|
) -> Result<BoundListeners, Box<dyn Error>> {
|
||||||
startup_tracker
|
startup_tracker
|
||||||
|
|
@ -224,6 +226,7 @@ pub(crate) async fn bind_listeners(
|
||||||
let tls_cache = tls_cache.clone();
|
let tls_cache = tls_cache.clone();
|
||||||
let ip_tracker = ip_tracker.clone();
|
let ip_tracker = ip_tracker.clone();
|
||||||
let beobachten = beobachten.clone();
|
let beobachten = beobachten.clone();
|
||||||
|
let shared = shared.clone();
|
||||||
let max_connections_unix = max_connections.clone();
|
let max_connections_unix = max_connections.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
|
|
@ -284,11 +287,12 @@ pub(crate) async fn bind_listeners(
|
||||||
let tls_cache = tls_cache.clone();
|
let tls_cache = tls_cache.clone();
|
||||||
let ip_tracker = ip_tracker.clone();
|
let ip_tracker = ip_tracker.clone();
|
||||||
let beobachten = beobachten.clone();
|
let beobachten = beobachten.clone();
|
||||||
|
let shared = shared.clone();
|
||||||
let proxy_protocol_enabled = config.server.proxy_protocol;
|
let proxy_protocol_enabled = config.server.proxy_protocol;
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let _permit = permit;
|
let _permit = permit;
|
||||||
if let Err(e) = crate::proxy::client::handle_client_stream(
|
if let Err(e) = crate::proxy::client::handle_client_stream_with_shared(
|
||||||
stream,
|
stream,
|
||||||
fake_peer,
|
fake_peer,
|
||||||
config,
|
config,
|
||||||
|
|
@ -302,6 +306,7 @@ pub(crate) async fn bind_listeners(
|
||||||
tls_cache,
|
tls_cache,
|
||||||
ip_tracker,
|
ip_tracker,
|
||||||
beobachten,
|
beobachten,
|
||||||
|
shared,
|
||||||
proxy_protocol_enabled,
|
proxy_protocol_enabled,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
|
@ -351,6 +356,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
||||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
beobachten: Arc<BeobachtenStore>,
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
shared: Arc<ProxySharedState>,
|
||||||
max_connections: Arc<Semaphore>,
|
max_connections: Arc<Semaphore>,
|
||||||
) {
|
) {
|
||||||
for (listener, listener_proxy_protocol) in listeners {
|
for (listener, listener_proxy_protocol) in listeners {
|
||||||
|
|
@ -366,6 +372,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
||||||
let tls_cache = tls_cache.clone();
|
let tls_cache = tls_cache.clone();
|
||||||
let ip_tracker = ip_tracker.clone();
|
let ip_tracker = ip_tracker.clone();
|
||||||
let beobachten = beobachten.clone();
|
let beobachten = beobachten.clone();
|
||||||
|
let shared = shared.clone();
|
||||||
let max_connections_tcp = max_connections.clone();
|
let max_connections_tcp = max_connections.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
|
|
@ -421,13 +428,14 @@ pub(crate) fn spawn_tcp_accept_loops(
|
||||||
let tls_cache = tls_cache.clone();
|
let tls_cache = tls_cache.clone();
|
||||||
let ip_tracker = ip_tracker.clone();
|
let ip_tracker = ip_tracker.clone();
|
||||||
let beobachten = beobachten.clone();
|
let beobachten = beobachten.clone();
|
||||||
|
let shared = shared.clone();
|
||||||
let proxy_protocol_enabled = listener_proxy_protocol;
|
let proxy_protocol_enabled = listener_proxy_protocol;
|
||||||
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
|
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
|
||||||
let real_peer_report_for_handler = real_peer_report.clone();
|
let real_peer_report_for_handler = real_peer_report.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let _permit = permit;
|
let _permit = permit;
|
||||||
if let Err(e) = ClientHandler::new(
|
if let Err(e) = ClientHandler::new_with_shared(
|
||||||
stream,
|
stream,
|
||||||
peer_addr,
|
peer_addr,
|
||||||
config,
|
config,
|
||||||
|
|
@ -441,6 +449,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
||||||
tls_cache,
|
tls_cache,
|
||||||
ip_tracker,
|
ip_tracker,
|
||||||
beobachten,
|
beobachten,
|
||||||
|
shared,
|
||||||
proxy_protocol_enabled,
|
proxy_protocol_enabled,
|
||||||
real_peer_report_for_handler,
|
real_peer_report_for_handler,
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -33,6 +33,7 @@ use crate::crypto::SecureRandom;
|
||||||
use crate::ip_tracker::UserIpTracker;
|
use crate::ip_tracker::UserIpTracker;
|
||||||
use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe};
|
use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe};
|
||||||
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||||
|
use crate::proxy::shared_state::ProxySharedState;
|
||||||
use crate::startup::{
|
use crate::startup::{
|
||||||
COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD, COMPONENT_ME_POOL_CONSTRUCT,
|
COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD, COMPONENT_ME_POOL_CONSTRUCT,
|
||||||
COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6,
|
COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6,
|
||||||
|
|
@ -631,6 +632,7 @@ async fn run_inner(
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
let _admission_tx_hold = admission_tx;
|
let _admission_tx_hold = admission_tx;
|
||||||
|
let shared_state = ProxySharedState::new();
|
||||||
|
|
||||||
let bound = listeners::bind_listeners(
|
let bound = listeners::bind_listeners(
|
||||||
&config,
|
&config,
|
||||||
|
|
@ -651,6 +653,7 @@ async fn run_inner(
|
||||||
tls_cache.clone(),
|
tls_cache.clone(),
|
||||||
ip_tracker.clone(),
|
ip_tracker.clone(),
|
||||||
beobachten.clone(),
|
beobachten.clone(),
|
||||||
|
shared_state.clone(),
|
||||||
max_connections.clone(),
|
max_connections.clone(),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
@ -707,6 +710,7 @@ async fn run_inner(
|
||||||
tls_cache.clone(),
|
tls_cache.clone(),
|
||||||
ip_tracker.clone(),
|
ip_tracker.clone(),
|
||||||
beobachten.clone(),
|
beobachten.clone(),
|
||||||
|
shared_state,
|
||||||
max_connections.clone(),
|
max_connections.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -81,10 +81,15 @@ use crate::transport::socket::normalize_ip;
|
||||||
use crate::transport::{UpstreamManager, configure_client_socket, parse_proxy_protocol};
|
use crate::transport::{UpstreamManager, configure_client_socket, parse_proxy_protocol};
|
||||||
|
|
||||||
use crate::proxy::direct_relay::handle_via_direct;
|
use crate::proxy::direct_relay::handle_via_direct;
|
||||||
use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle_tls_handshake};
|
use crate::proxy::handshake::{
|
||||||
|
HandshakeSuccess, handle_mtproto_handshake_with_shared, handle_tls_handshake_with_shared,
|
||||||
|
};
|
||||||
|
#[cfg(test)]
|
||||||
|
use crate::proxy::handshake::{handle_mtproto_handshake, handle_tls_handshake};
|
||||||
use crate::proxy::masking::handle_bad_client;
|
use crate::proxy::masking::handle_bad_client;
|
||||||
use crate::proxy::middle_relay::handle_via_middle_proxy;
|
use crate::proxy::middle_relay::handle_via_middle_proxy;
|
||||||
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||||
|
use crate::proxy::shared_state::ProxySharedState;
|
||||||
|
|
||||||
fn beobachten_ttl(config: &ProxyConfig) -> Duration {
|
fn beobachten_ttl(config: &ProxyConfig) -> Duration {
|
||||||
const BEOBACHTEN_TTL_MAX_MINUTES: u64 = 24 * 60;
|
const BEOBACHTEN_TTL_MAX_MINUTES: u64 = 24 * 60;
|
||||||
|
|
@ -342,7 +347,48 @@ fn synthetic_local_addr(port: u16) -> SocketAddr {
|
||||||
SocketAddr::from(([0, 0, 0, 0], port))
|
SocketAddr::from(([0, 0, 0, 0], port))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
pub async fn handle_client_stream<S>(
|
pub async fn handle_client_stream<S>(
|
||||||
|
stream: S,
|
||||||
|
peer: SocketAddr,
|
||||||
|
config: Arc<ProxyConfig>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
proxy_protocol_enabled: bool,
|
||||||
|
) -> Result<()>
|
||||||
|
where
|
||||||
|
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
|
||||||
|
{
|
||||||
|
handle_client_stream_with_shared(
|
||||||
|
stream,
|
||||||
|
peer,
|
||||||
|
config,
|
||||||
|
stats,
|
||||||
|
upstream_manager,
|
||||||
|
replay_checker,
|
||||||
|
buffer_pool,
|
||||||
|
rng,
|
||||||
|
me_pool,
|
||||||
|
route_runtime,
|
||||||
|
tls_cache,
|
||||||
|
ip_tracker,
|
||||||
|
beobachten,
|
||||||
|
ProxySharedState::new(),
|
||||||
|
proxy_protocol_enabled,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub async fn handle_client_stream_with_shared<S>(
|
||||||
mut stream: S,
|
mut stream: S,
|
||||||
peer: SocketAddr,
|
peer: SocketAddr,
|
||||||
config: Arc<ProxyConfig>,
|
config: Arc<ProxyConfig>,
|
||||||
|
|
@ -356,6 +402,7 @@ pub async fn handle_client_stream<S>(
|
||||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
beobachten: Arc<BeobachtenStore>,
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
shared: Arc<ProxySharedState>,
|
||||||
proxy_protocol_enabled: bool,
|
proxy_protocol_enabled: bool,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
|
|
@ -550,9 +597,10 @@ where
|
||||||
|
|
||||||
let (read_half, write_half) = tokio::io::split(stream);
|
let (read_half, write_half) = tokio::io::split(stream);
|
||||||
|
|
||||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake(
|
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake_with_shared(
|
||||||
&handshake, read_half, write_half, real_peer,
|
&handshake, read_half, write_half, real_peer,
|
||||||
&config, &replay_checker, &rng, tls_cache.clone(),
|
&config, &replay_checker, &rng, tls_cache.clone(),
|
||||||
|
shared.as_ref(),
|
||||||
).await {
|
).await {
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
|
|
@ -578,9 +626,10 @@ where
|
||||||
let mtproto_handshake: [u8; HANDSHAKE_LEN] = mtproto_data[..].try_into()
|
let mtproto_handshake: [u8; HANDSHAKE_LEN] = mtproto_data[..].try_into()
|
||||||
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
||||||
|
|
||||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||||
&mtproto_handshake, tls_reader, tls_writer, real_peer,
|
&mtproto_handshake, tls_reader, tls_writer, real_peer,
|
||||||
&config, &replay_checker, true, Some(tls_user.as_str()),
|
&config, &replay_checker, true, Some(tls_user.as_str()),
|
||||||
|
shared.as_ref(),
|
||||||
).await {
|
).await {
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
|
|
@ -614,11 +663,12 @@ where
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||||
RunningClientHandler::handle_authenticated_static(
|
RunningClientHandler::handle_authenticated_static_with_shared(
|
||||||
crypto_reader, crypto_writer, success,
|
crypto_reader, crypto_writer, success,
|
||||||
upstream_manager, stats, config, buffer_pool, rng, me_pool,
|
upstream_manager, stats, config, buffer_pool, rng, me_pool,
|
||||||
route_runtime.clone(),
|
route_runtime.clone(),
|
||||||
local_addr, real_peer, ip_tracker.clone(),
|
local_addr, real_peer, ip_tracker.clone(),
|
||||||
|
shared.clone(),
|
||||||
),
|
),
|
||||||
)))
|
)))
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -644,9 +694,10 @@ where
|
||||||
|
|
||||||
let (read_half, write_half) = tokio::io::split(stream);
|
let (read_half, write_half) = tokio::io::split(stream);
|
||||||
|
|
||||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||||
&handshake, read_half, write_half, real_peer,
|
&handshake, read_half, write_half, real_peer,
|
||||||
&config, &replay_checker, false, None,
|
&config, &replay_checker, false, None,
|
||||||
|
shared.as_ref(),
|
||||||
).await {
|
).await {
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
|
|
@ -665,7 +716,7 @@ where
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||||
RunningClientHandler::handle_authenticated_static(
|
RunningClientHandler::handle_authenticated_static_with_shared(
|
||||||
crypto_reader,
|
crypto_reader,
|
||||||
crypto_writer,
|
crypto_writer,
|
||||||
success,
|
success,
|
||||||
|
|
@ -679,6 +730,7 @@ where
|
||||||
local_addr,
|
local_addr,
|
||||||
real_peer,
|
real_peer,
|
||||||
ip_tracker.clone(),
|
ip_tracker.clone(),
|
||||||
|
shared.clone(),
|
||||||
)
|
)
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
@ -731,10 +783,12 @@ pub struct RunningClientHandler {
|
||||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
beobachten: Arc<BeobachtenStore>,
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
shared: Arc<ProxySharedState>,
|
||||||
proxy_protocol_enabled: bool,
|
proxy_protocol_enabled: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ClientHandler {
|
impl ClientHandler {
|
||||||
|
#[cfg(test)]
|
||||||
pub fn new(
|
pub fn new(
|
||||||
stream: TcpStream,
|
stream: TcpStream,
|
||||||
peer: SocketAddr,
|
peer: SocketAddr,
|
||||||
|
|
@ -751,6 +805,45 @@ impl ClientHandler {
|
||||||
beobachten: Arc<BeobachtenStore>,
|
beobachten: Arc<BeobachtenStore>,
|
||||||
proxy_protocol_enabled: bool,
|
proxy_protocol_enabled: bool,
|
||||||
real_peer_report: Arc<std::sync::Mutex<Option<SocketAddr>>>,
|
real_peer_report: Arc<std::sync::Mutex<Option<SocketAddr>>>,
|
||||||
|
) -> RunningClientHandler {
|
||||||
|
Self::new_with_shared(
|
||||||
|
stream,
|
||||||
|
peer,
|
||||||
|
config,
|
||||||
|
stats,
|
||||||
|
upstream_manager,
|
||||||
|
replay_checker,
|
||||||
|
buffer_pool,
|
||||||
|
rng,
|
||||||
|
me_pool,
|
||||||
|
route_runtime,
|
||||||
|
tls_cache,
|
||||||
|
ip_tracker,
|
||||||
|
beobachten,
|
||||||
|
ProxySharedState::new(),
|
||||||
|
proxy_protocol_enabled,
|
||||||
|
real_peer_report,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub fn new_with_shared(
|
||||||
|
stream: TcpStream,
|
||||||
|
peer: SocketAddr,
|
||||||
|
config: Arc<ProxyConfig>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
shared: Arc<ProxySharedState>,
|
||||||
|
proxy_protocol_enabled: bool,
|
||||||
|
real_peer_report: Arc<std::sync::Mutex<Option<SocketAddr>>>,
|
||||||
) -> RunningClientHandler {
|
) -> RunningClientHandler {
|
||||||
let normalized_peer = normalize_ip(peer);
|
let normalized_peer = normalize_ip(peer);
|
||||||
RunningClientHandler {
|
RunningClientHandler {
|
||||||
|
|
@ -769,6 +862,7 @@ impl ClientHandler {
|
||||||
tls_cache,
|
tls_cache,
|
||||||
ip_tracker,
|
ip_tracker,
|
||||||
beobachten,
|
beobachten,
|
||||||
|
shared,
|
||||||
proxy_protocol_enabled,
|
proxy_protocol_enabled,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -1058,7 +1152,7 @@ impl RunningClientHandler {
|
||||||
|
|
||||||
let (read_half, write_half) = self.stream.into_split();
|
let (read_half, write_half) = self.stream.into_split();
|
||||||
|
|
||||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake(
|
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake_with_shared(
|
||||||
&handshake,
|
&handshake,
|
||||||
read_half,
|
read_half,
|
||||||
write_half,
|
write_half,
|
||||||
|
|
@ -1067,6 +1161,7 @@ impl RunningClientHandler {
|
||||||
&replay_checker,
|
&replay_checker,
|
||||||
&self.rng,
|
&self.rng,
|
||||||
self.tls_cache.clone(),
|
self.tls_cache.clone(),
|
||||||
|
self.shared.as_ref(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
|
|
@ -1095,7 +1190,7 @@ impl RunningClientHandler {
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
||||||
|
|
||||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||||
&mtproto_handshake,
|
&mtproto_handshake,
|
||||||
tls_reader,
|
tls_reader,
|
||||||
tls_writer,
|
tls_writer,
|
||||||
|
|
@ -1104,6 +1199,7 @@ impl RunningClientHandler {
|
||||||
&replay_checker,
|
&replay_checker,
|
||||||
true,
|
true,
|
||||||
Some(tls_user.as_str()),
|
Some(tls_user.as_str()),
|
||||||
|
self.shared.as_ref(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
|
|
@ -1140,7 +1236,7 @@ impl RunningClientHandler {
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||||
Self::handle_authenticated_static(
|
Self::handle_authenticated_static_with_shared(
|
||||||
crypto_reader,
|
crypto_reader,
|
||||||
crypto_writer,
|
crypto_writer,
|
||||||
success,
|
success,
|
||||||
|
|
@ -1154,6 +1250,7 @@ impl RunningClientHandler {
|
||||||
local_addr,
|
local_addr,
|
||||||
peer,
|
peer,
|
||||||
self.ip_tracker,
|
self.ip_tracker,
|
||||||
|
self.shared,
|
||||||
),
|
),
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
@ -1192,7 +1289,7 @@ impl RunningClientHandler {
|
||||||
|
|
||||||
let (read_half, write_half) = self.stream.into_split();
|
let (read_half, write_half) = self.stream.into_split();
|
||||||
|
|
||||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||||
&handshake,
|
&handshake,
|
||||||
read_half,
|
read_half,
|
||||||
write_half,
|
write_half,
|
||||||
|
|
@ -1201,6 +1298,7 @@ impl RunningClientHandler {
|
||||||
&replay_checker,
|
&replay_checker,
|
||||||
false,
|
false,
|
||||||
None,
|
None,
|
||||||
|
self.shared.as_ref(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
|
|
@ -1221,7 +1319,7 @@ impl RunningClientHandler {
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||||
Self::handle_authenticated_static(
|
Self::handle_authenticated_static_with_shared(
|
||||||
crypto_reader,
|
crypto_reader,
|
||||||
crypto_writer,
|
crypto_writer,
|
||||||
success,
|
success,
|
||||||
|
|
@ -1235,6 +1333,7 @@ impl RunningClientHandler {
|
||||||
local_addr,
|
local_addr,
|
||||||
peer,
|
peer,
|
||||||
self.ip_tracker,
|
self.ip_tracker,
|
||||||
|
self.shared,
|
||||||
),
|
),
|
||||||
)))
|
)))
|
||||||
}
|
}
|
||||||
|
|
@ -1243,6 +1342,7 @@ impl RunningClientHandler {
|
||||||
/// Two modes:
|
/// Two modes:
|
||||||
/// - Direct: TCP relay to TG DC (existing behavior)
|
/// - Direct: TCP relay to TG DC (existing behavior)
|
||||||
/// - Middle Proxy: RPC multiplex through ME pool (new — supports CDN DCs)
|
/// - Middle Proxy: RPC multiplex through ME pool (new — supports CDN DCs)
|
||||||
|
#[cfg(test)]
|
||||||
async fn handle_authenticated_static<R, W>(
|
async fn handle_authenticated_static<R, W>(
|
||||||
client_reader: CryptoReader<R>,
|
client_reader: CryptoReader<R>,
|
||||||
client_writer: CryptoWriter<W>,
|
client_writer: CryptoWriter<W>,
|
||||||
|
|
@ -1258,6 +1358,45 @@ impl RunningClientHandler {
|
||||||
peer_addr: SocketAddr,
|
peer_addr: SocketAddr,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
|
where
|
||||||
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
|
W: AsyncWrite + Unpin + Send + 'static,
|
||||||
|
{
|
||||||
|
Self::handle_authenticated_static_with_shared(
|
||||||
|
client_reader,
|
||||||
|
client_writer,
|
||||||
|
success,
|
||||||
|
upstream_manager,
|
||||||
|
stats,
|
||||||
|
config,
|
||||||
|
buffer_pool,
|
||||||
|
rng,
|
||||||
|
me_pool,
|
||||||
|
route_runtime,
|
||||||
|
local_addr,
|
||||||
|
peer_addr,
|
||||||
|
ip_tracker,
|
||||||
|
ProxySharedState::new(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_authenticated_static_with_shared<R, W>(
|
||||||
|
client_reader: CryptoReader<R>,
|
||||||
|
client_writer: CryptoWriter<W>,
|
||||||
|
success: HandshakeSuccess,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
config: Arc<ProxyConfig>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
local_addr: SocketAddr,
|
||||||
|
peer_addr: SocketAddr,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
_shared: Arc<ProxySharedState>,
|
||||||
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
W: AsyncWrite + Unpin + Send + 'static,
|
W: AsyncWrite + Unpin + Send + 'static,
|
||||||
|
|
@ -1299,6 +1438,7 @@ impl RunningClientHandler {
|
||||||
route_runtime.subscribe(),
|
route_runtime.subscribe(),
|
||||||
route_snapshot,
|
route_snapshot,
|
||||||
session_id,
|
session_id,
|
||||||
|
_shared,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
} else {
|
} else {
|
||||||
|
|
|
||||||
|
|
@ -4,13 +4,16 @@
|
||||||
|
|
||||||
use dashmap::DashMap;
|
use dashmap::DashMap;
|
||||||
use dashmap::mapref::entry::Entry;
|
use dashmap::mapref::entry::Entry;
|
||||||
|
#[cfg(test)]
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
|
#[cfg(test)]
|
||||||
use std::collections::hash_map::RandomState;
|
use std::collections::hash_map::RandomState;
|
||||||
use std::hash::{BuildHasher, Hash, Hasher};
|
use std::hash::{BuildHasher, Hash, Hasher};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::net::{IpAddr, Ipv6Addr};
|
use std::net::{IpAddr, Ipv6Addr};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::{Mutex, OnceLock};
|
#[cfg(test)]
|
||||||
|
use std::sync::Mutex;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||||
use tracing::{debug, info, trace, warn};
|
use tracing::{debug, info, trace, warn};
|
||||||
|
|
@ -21,15 +24,15 @@ use crate::crypto::{AesCtr, SecureRandom, sha256};
|
||||||
use crate::error::{HandshakeResult, ProxyError};
|
use crate::error::{HandshakeResult, ProxyError};
|
||||||
use crate::protocol::constants::*;
|
use crate::protocol::constants::*;
|
||||||
use crate::protocol::tls;
|
use crate::protocol::tls;
|
||||||
|
use crate::proxy::shared_state::ProxySharedState;
|
||||||
use crate::stats::ReplayChecker;
|
use crate::stats::ReplayChecker;
|
||||||
use crate::stream::{CryptoReader, CryptoWriter, FakeTlsReader, FakeTlsWriter};
|
use crate::stream::{CryptoReader, CryptoWriter, FakeTlsReader, FakeTlsWriter};
|
||||||
use crate::tls_front::{TlsFrontCache, emulator};
|
use crate::tls_front::{TlsFrontCache, emulator};
|
||||||
|
#[cfg(test)]
|
||||||
use rand::RngExt;
|
use rand::RngExt;
|
||||||
|
|
||||||
const ACCESS_SECRET_BYTES: usize = 16;
|
const ACCESS_SECRET_BYTES: usize = 16;
|
||||||
static INVALID_SECRET_WARNED: OnceLock<Mutex<HashSet<(String, String)>>> = OnceLock::new();
|
|
||||||
const UNKNOWN_SNI_WARN_COOLDOWN_SECS: u64 = 5;
|
const UNKNOWN_SNI_WARN_COOLDOWN_SECS: u64 = 5;
|
||||||
static UNKNOWN_SNI_WARN_NEXT_ALLOWED: OnceLock<Mutex<Option<Instant>>> = OnceLock::new();
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
const WARNED_SECRET_MAX_ENTRIES: usize = 64;
|
const WARNED_SECRET_MAX_ENTRIES: usize = 64;
|
||||||
#[cfg(not(test))]
|
#[cfg(not(test))]
|
||||||
|
|
@ -55,48 +58,30 @@ const AUTH_PROBE_BACKOFF_MAX_MS: u64 = 16;
|
||||||
const AUTH_PROBE_BACKOFF_MAX_MS: u64 = 1_000;
|
const AUTH_PROBE_BACKOFF_MAX_MS: u64 = 1_000;
|
||||||
|
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
struct AuthProbeState {
|
pub(crate) struct AuthProbeState {
|
||||||
fail_streak: u32,
|
fail_streak: u32,
|
||||||
blocked_until: Instant,
|
blocked_until: Instant,
|
||||||
last_seen: Instant,
|
last_seen: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
struct AuthProbeSaturationState {
|
pub(crate) struct AuthProbeSaturationState {
|
||||||
fail_streak: u32,
|
fail_streak: u32,
|
||||||
blocked_until: Instant,
|
blocked_until: Instant,
|
||||||
last_seen: Instant,
|
last_seen: Instant,
|
||||||
}
|
}
|
||||||
|
fn unknown_sni_warn_state_lock_in(
|
||||||
static AUTH_PROBE_STATE: OnceLock<DashMap<IpAddr, AuthProbeState>> = OnceLock::new();
|
shared: &ProxySharedState,
|
||||||
static AUTH_PROBE_SATURATION_STATE: OnceLock<Mutex<Option<AuthProbeSaturationState>>> =
|
) -> std::sync::MutexGuard<'_, Option<Instant>> {
|
||||||
OnceLock::new();
|
shared
|
||||||
static AUTH_PROBE_EVICTION_HASHER: OnceLock<RandomState> = OnceLock::new();
|
.handshake
|
||||||
|
.unknown_sni_warn_next_allowed
|
||||||
fn auth_probe_state_map() -> &'static DashMap<IpAddr, AuthProbeState> {
|
|
||||||
AUTH_PROBE_STATE.get_or_init(DashMap::new)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn auth_probe_saturation_state() -> &'static Mutex<Option<AuthProbeSaturationState>> {
|
|
||||||
AUTH_PROBE_SATURATION_STATE.get_or_init(|| Mutex::new(None))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn auth_probe_saturation_state_lock()
|
|
||||||
-> std::sync::MutexGuard<'static, Option<AuthProbeSaturationState>> {
|
|
||||||
auth_probe_saturation_state()
|
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unknown_sni_warn_state_lock() -> std::sync::MutexGuard<'static, Option<Instant>> {
|
fn should_emit_unknown_sni_warn_in(shared: &ProxySharedState, now: Instant) -> bool {
|
||||||
UNKNOWN_SNI_WARN_NEXT_ALLOWED
|
let mut guard = unknown_sni_warn_state_lock_in(shared);
|
||||||
.get_or_init(|| Mutex::new(None))
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn should_emit_unknown_sni_warn(now: Instant) -> bool {
|
|
||||||
let mut guard = unknown_sni_warn_state_lock();
|
|
||||||
if let Some(next_allowed) = *guard
|
if let Some(next_allowed) = *guard
|
||||||
&& now < next_allowed
|
&& now < next_allowed
|
||||||
{
|
{
|
||||||
|
|
@ -133,15 +118,16 @@ fn auth_probe_state_expired(state: &AuthProbeState, now: Instant) -> bool {
|
||||||
now.duration_since(state.last_seen) > retention
|
now.duration_since(state.last_seen) > retention
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_eviction_offset(peer_ip: IpAddr, now: Instant) -> usize {
|
fn auth_probe_eviction_offset_in(shared: &ProxySharedState, peer_ip: IpAddr, now: Instant) -> usize {
|
||||||
let hasher_state = AUTH_PROBE_EVICTION_HASHER.get_or_init(RandomState::new);
|
let hasher_state = &shared.handshake.auth_probe_eviction_hasher;
|
||||||
let mut hasher = hasher_state.build_hasher();
|
let mut hasher = hasher_state.build_hasher();
|
||||||
peer_ip.hash(&mut hasher);
|
peer_ip.hash(&mut hasher);
|
||||||
now.hash(&mut hasher);
|
now.hash(&mut hasher);
|
||||||
hasher.finish() as usize
|
hasher.finish() as usize
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_scan_start_offset(
|
fn auth_probe_scan_start_offset_in(
|
||||||
|
shared: &ProxySharedState,
|
||||||
peer_ip: IpAddr,
|
peer_ip: IpAddr,
|
||||||
now: Instant,
|
now: Instant,
|
||||||
state_len: usize,
|
state_len: usize,
|
||||||
|
|
@ -151,12 +137,12 @@ fn auth_probe_scan_start_offset(
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
auth_probe_eviction_offset(peer_ip, now) % state_len
|
auth_probe_eviction_offset_in(shared, peer_ip, now) % state_len
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_is_throttled(peer_ip: IpAddr, now: Instant) -> bool {
|
fn auth_probe_is_throttled_in(shared: &ProxySharedState, peer_ip: IpAddr, now: Instant) -> bool {
|
||||||
let peer_ip = normalize_auth_probe_ip(peer_ip);
|
let peer_ip = normalize_auth_probe_ip(peer_ip);
|
||||||
let state = auth_probe_state_map();
|
let state = &shared.handshake.auth_probe;
|
||||||
let Some(entry) = state.get(&peer_ip) else {
|
let Some(entry) = state.get(&peer_ip) else {
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
@ -168,9 +154,13 @@ fn auth_probe_is_throttled(peer_ip: IpAddr, now: Instant) -> bool {
|
||||||
now < entry.blocked_until
|
now < entry.blocked_until
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_saturation_grace_exhausted(peer_ip: IpAddr, now: Instant) -> bool {
|
fn auth_probe_saturation_grace_exhausted_in(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
peer_ip: IpAddr,
|
||||||
|
now: Instant,
|
||||||
|
) -> bool {
|
||||||
let peer_ip = normalize_auth_probe_ip(peer_ip);
|
let peer_ip = normalize_auth_probe_ip(peer_ip);
|
||||||
let state = auth_probe_state_map();
|
let state = &shared.handshake.auth_probe;
|
||||||
let Some(entry) = state.get(&peer_ip) else {
|
let Some(entry) = state.get(&peer_ip) else {
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
@ -183,20 +173,28 @@ fn auth_probe_saturation_grace_exhausted(peer_ip: IpAddr, now: Instant) -> bool
|
||||||
entry.fail_streak >= AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS
|
entry.fail_streak >= AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_should_apply_preauth_throttle(peer_ip: IpAddr, now: Instant) -> bool {
|
fn auth_probe_should_apply_preauth_throttle_in(
|
||||||
if !auth_probe_is_throttled(peer_ip, now) {
|
shared: &ProxySharedState,
|
||||||
|
peer_ip: IpAddr,
|
||||||
|
now: Instant,
|
||||||
|
) -> bool {
|
||||||
|
if !auth_probe_is_throttled_in(shared, peer_ip, now) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !auth_probe_saturation_is_throttled(now) {
|
if !auth_probe_saturation_is_throttled_in(shared, now) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
auth_probe_saturation_grace_exhausted(peer_ip, now)
|
auth_probe_saturation_grace_exhausted_in(shared, peer_ip, now)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_saturation_is_throttled(now: Instant) -> bool {
|
fn auth_probe_saturation_is_throttled_in(shared: &ProxySharedState, now: Instant) -> bool {
|
||||||
let mut guard = auth_probe_saturation_state_lock();
|
let mut guard = shared
|
||||||
|
.handshake
|
||||||
|
.auth_probe_saturation
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
|
||||||
let Some(state) = guard.as_mut() else {
|
let Some(state) = guard.as_mut() else {
|
||||||
return false;
|
return false;
|
||||||
|
|
@ -214,8 +212,12 @@ fn auth_probe_saturation_is_throttled(now: Instant) -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_note_saturation(now: Instant) {
|
fn auth_probe_note_saturation_in(shared: &ProxySharedState, now: Instant) {
|
||||||
let mut guard = auth_probe_saturation_state_lock();
|
let mut guard = shared
|
||||||
|
.handshake
|
||||||
|
.auth_probe_saturation
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
|
||||||
match guard.as_mut() {
|
match guard.as_mut() {
|
||||||
Some(state)
|
Some(state)
|
||||||
|
|
@ -237,13 +239,14 @@ fn auth_probe_note_saturation(now: Instant) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_record_failure(peer_ip: IpAddr, now: Instant) {
|
fn auth_probe_record_failure_in(shared: &ProxySharedState, peer_ip: IpAddr, now: Instant) {
|
||||||
let peer_ip = normalize_auth_probe_ip(peer_ip);
|
let peer_ip = normalize_auth_probe_ip(peer_ip);
|
||||||
let state = auth_probe_state_map();
|
let state = &shared.handshake.auth_probe;
|
||||||
auth_probe_record_failure_with_state(state, peer_ip, now);
|
auth_probe_record_failure_with_state_in(shared, state, peer_ip, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_record_failure_with_state(
|
fn auth_probe_record_failure_with_state_in(
|
||||||
|
shared: &ProxySharedState,
|
||||||
state: &DashMap<IpAddr, AuthProbeState>,
|
state: &DashMap<IpAddr, AuthProbeState>,
|
||||||
peer_ip: IpAddr,
|
peer_ip: IpAddr,
|
||||||
now: Instant,
|
now: Instant,
|
||||||
|
|
@ -277,7 +280,7 @@ fn auth_probe_record_failure_with_state(
|
||||||
while state.len() >= AUTH_PROBE_TRACK_MAX_ENTRIES {
|
while state.len() >= AUTH_PROBE_TRACK_MAX_ENTRIES {
|
||||||
rounds += 1;
|
rounds += 1;
|
||||||
if rounds > 8 {
|
if rounds > 8 {
|
||||||
auth_probe_note_saturation(now);
|
auth_probe_note_saturation_in(shared, now);
|
||||||
let mut eviction_candidate: Option<(IpAddr, u32, Instant)> = None;
|
let mut eviction_candidate: Option<(IpAddr, u32, Instant)> = None;
|
||||||
for entry in state.iter().take(AUTH_PROBE_PRUNE_SCAN_LIMIT) {
|
for entry in state.iter().take(AUTH_PROBE_PRUNE_SCAN_LIMIT) {
|
||||||
let key = *entry.key();
|
let key = *entry.key();
|
||||||
|
|
@ -320,7 +323,7 @@ fn auth_probe_record_failure_with_state(
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let start_offset =
|
let start_offset =
|
||||||
auth_probe_scan_start_offset(peer_ip, now, state_len, scan_limit);
|
auth_probe_scan_start_offset_in(shared, peer_ip, now, state_len, scan_limit);
|
||||||
let mut scanned = 0usize;
|
let mut scanned = 0usize;
|
||||||
for entry in state.iter().skip(start_offset) {
|
for entry in state.iter().skip(start_offset) {
|
||||||
let key = *entry.key();
|
let key = *entry.key();
|
||||||
|
|
@ -369,11 +372,11 @@ fn auth_probe_record_failure_with_state(
|
||||||
}
|
}
|
||||||
|
|
||||||
let Some((evict_key, _, _)) = eviction_candidate else {
|
let Some((evict_key, _, _)) = eviction_candidate else {
|
||||||
auth_probe_note_saturation(now);
|
auth_probe_note_saturation_in(shared, now);
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
state.remove(&evict_key);
|
state.remove(&evict_key);
|
||||||
auth_probe_note_saturation(now);
|
auth_probe_note_saturation_in(shared, now);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -387,89 +390,58 @@ fn auth_probe_record_failure_with_state(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_record_success(peer_ip: IpAddr) {
|
fn auth_probe_record_success_in(shared: &ProxySharedState, peer_ip: IpAddr) {
|
||||||
let peer_ip = normalize_auth_probe_ip(peer_ip);
|
let peer_ip = normalize_auth_probe_ip(peer_ip);
|
||||||
let state = auth_probe_state_map();
|
let state = &shared.handshake.auth_probe;
|
||||||
state.remove(&peer_ip);
|
state.remove(&peer_ip);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn clear_auth_probe_state_for_testing() {
|
pub(crate) fn auth_probe_record_failure_for_testing(
|
||||||
if let Some(state) = AUTH_PROBE_STATE.get() {
|
shared: &ProxySharedState,
|
||||||
state.clear();
|
peer_ip: IpAddr,
|
||||||
}
|
now: Instant,
|
||||||
if AUTH_PROBE_SATURATION_STATE.get().is_some() {
|
) {
|
||||||
let mut guard = auth_probe_saturation_state_lock();
|
auth_probe_record_failure_in(shared, peer_ip, now);
|
||||||
*guard = None;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn auth_probe_fail_streak_for_testing(peer_ip: IpAddr) -> Option<u32> {
|
pub(crate) fn auth_probe_fail_streak_for_testing_in_shared(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
peer_ip: IpAddr,
|
||||||
|
) -> Option<u32> {
|
||||||
let peer_ip = normalize_auth_probe_ip(peer_ip);
|
let peer_ip = normalize_auth_probe_ip(peer_ip);
|
||||||
let state = AUTH_PROBE_STATE.get()?;
|
shared
|
||||||
state.get(&peer_ip).map(|entry| entry.fail_streak)
|
.handshake
|
||||||
|
.auth_probe
|
||||||
|
.get(&peer_ip)
|
||||||
|
.map(|entry| entry.fail_streak)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn auth_probe_is_throttled_for_testing(peer_ip: IpAddr) -> bool {
|
pub(crate) fn clear_auth_probe_state_for_testing_in_shared(shared: &ProxySharedState) {
|
||||||
auth_probe_is_throttled(peer_ip, Instant::now())
|
shared.handshake.auth_probe.clear();
|
||||||
}
|
match shared.handshake.auth_probe_saturation.lock() {
|
||||||
|
Ok(mut saturation) => {
|
||||||
#[cfg(test)]
|
*saturation = None;
|
||||||
fn auth_probe_saturation_is_throttled_for_testing() -> bool {
|
}
|
||||||
auth_probe_saturation_is_throttled(Instant::now())
|
Err(poisoned) => {
|
||||||
}
|
let mut saturation = poisoned.into_inner();
|
||||||
|
*saturation = None;
|
||||||
#[cfg(test)]
|
shared.handshake.auth_probe_saturation.clear_poison();
|
||||||
fn auth_probe_saturation_is_throttled_at_for_testing(now: Instant) -> bool {
|
}
|
||||||
auth_probe_saturation_is_throttled(now)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
fn auth_probe_test_lock() -> &'static Mutex<()> {
|
|
||||||
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
|
||||||
TEST_LOCK.get_or_init(|| Mutex::new(()))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
fn unknown_sni_warn_test_lock() -> &'static Mutex<()> {
|
|
||||||
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
|
||||||
TEST_LOCK.get_or_init(|| Mutex::new(()))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
fn clear_unknown_sni_warn_state_for_testing() {
|
|
||||||
if UNKNOWN_SNI_WARN_NEXT_ALLOWED.get().is_some() {
|
|
||||||
let mut guard = unknown_sni_warn_state_lock();
|
|
||||||
*guard = None;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
fn warn_invalid_secret_once_in(
|
||||||
fn should_emit_unknown_sni_warn_for_testing(now: Instant) -> bool {
|
shared: &ProxySharedState,
|
||||||
should_emit_unknown_sni_warn(now)
|
name: &str,
|
||||||
}
|
reason: &str,
|
||||||
|
expected: usize,
|
||||||
#[cfg(test)]
|
got: Option<usize>,
|
||||||
fn clear_warned_secrets_for_testing() {
|
) {
|
||||||
if let Some(warned) = INVALID_SECRET_WARNED.get()
|
|
||||||
&& let Ok(mut guard) = warned.lock()
|
|
||||||
{
|
|
||||||
guard.clear();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
fn warned_secrets_test_lock() -> &'static Mutex<()> {
|
|
||||||
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
|
||||||
TEST_LOCK.get_or_init(|| Mutex::new(()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn warn_invalid_secret_once(name: &str, reason: &str, expected: usize, got: Option<usize>) {
|
|
||||||
let key = (name.to_string(), reason.to_string());
|
let key = (name.to_string(), reason.to_string());
|
||||||
let warned = INVALID_SECRET_WARNED.get_or_init(|| Mutex::new(HashSet::new()));
|
let should_warn = match shared.handshake.invalid_secret_warned.lock() {
|
||||||
let should_warn = match warned.lock() {
|
|
||||||
Ok(mut guard) => {
|
Ok(mut guard) => {
|
||||||
if !guard.contains(&key) && guard.len() >= WARNED_SECRET_MAX_ENTRIES {
|
if !guard.contains(&key) && guard.len() >= WARNED_SECRET_MAX_ENTRIES {
|
||||||
false
|
false
|
||||||
|
|
@ -502,11 +474,12 @@ fn warn_invalid_secret_once(name: &str, reason: &str, expected: usize, got: Opti
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_user_secret(name: &str, secret_hex: &str) -> Option<Vec<u8>> {
|
fn decode_user_secret(shared: &ProxySharedState, name: &str, secret_hex: &str) -> Option<Vec<u8>> {
|
||||||
match hex::decode(secret_hex) {
|
match hex::decode(secret_hex) {
|
||||||
Ok(bytes) if bytes.len() == ACCESS_SECRET_BYTES => Some(bytes),
|
Ok(bytes) if bytes.len() == ACCESS_SECRET_BYTES => Some(bytes),
|
||||||
Ok(bytes) => {
|
Ok(bytes) => {
|
||||||
warn_invalid_secret_once(
|
warn_invalid_secret_once_in(
|
||||||
|
shared,
|
||||||
name,
|
name,
|
||||||
"invalid_length",
|
"invalid_length",
|
||||||
ACCESS_SECRET_BYTES,
|
ACCESS_SECRET_BYTES,
|
||||||
|
|
@ -515,7 +488,7 @@ fn decode_user_secret(name: &str, secret_hex: &str) -> Option<Vec<u8>> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
warn_invalid_secret_once(name, "invalid_hex", ACCESS_SECRET_BYTES, None);
|
warn_invalid_secret_once_in(shared, name, "invalid_hex", ACCESS_SECRET_BYTES, None);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -543,7 +516,8 @@ fn mode_enabled_for_proto(config: &ProxyConfig, proto_tag: ProtoTag, is_tls: boo
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decode_user_secrets(
|
fn decode_user_secrets_in(
|
||||||
|
shared: &ProxySharedState,
|
||||||
config: &ProxyConfig,
|
config: &ProxyConfig,
|
||||||
preferred_user: Option<&str>,
|
preferred_user: Option<&str>,
|
||||||
) -> Vec<(String, Vec<u8>)> {
|
) -> Vec<(String, Vec<u8>)> {
|
||||||
|
|
@ -551,7 +525,7 @@ fn decode_user_secrets(
|
||||||
|
|
||||||
if let Some(preferred) = preferred_user
|
if let Some(preferred) = preferred_user
|
||||||
&& let Some(secret_hex) = config.access.users.get(preferred)
|
&& let Some(secret_hex) = config.access.users.get(preferred)
|
||||||
&& let Some(bytes) = decode_user_secret(preferred, secret_hex)
|
&& let Some(bytes) = decode_user_secret(shared, preferred, secret_hex)
|
||||||
{
|
{
|
||||||
secrets.push((preferred.to_string(), bytes));
|
secrets.push((preferred.to_string(), bytes));
|
||||||
}
|
}
|
||||||
|
|
@ -560,7 +534,7 @@ fn decode_user_secrets(
|
||||||
if preferred_user.is_some_and(|preferred| preferred == name.as_str()) {
|
if preferred_user.is_some_and(|preferred| preferred == name.as_str()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if let Some(bytes) = decode_user_secret(name, secret_hex) {
|
if let Some(bytes) = decode_user_secret(shared, name, secret_hex) {
|
||||||
secrets.push((name.clone(), bytes));
|
secrets.push((name.clone(), bytes));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -568,6 +542,86 @@ fn decode_user_secrets(
|
||||||
secrets
|
secrets
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn auth_probe_state_for_testing_in_shared(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
) -> &DashMap<IpAddr, AuthProbeState> {
|
||||||
|
&shared.handshake.auth_probe
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn auth_probe_saturation_state_for_testing_in_shared(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
) -> &Mutex<Option<AuthProbeSaturationState>> {
|
||||||
|
&shared.handshake.auth_probe_saturation
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn auth_probe_saturation_state_lock_for_testing_in_shared(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
) -> std::sync::MutexGuard<'_, Option<AuthProbeSaturationState>> {
|
||||||
|
shared
|
||||||
|
.handshake
|
||||||
|
.auth_probe_saturation
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn clear_unknown_sni_warn_state_for_testing_in_shared(shared: &ProxySharedState) {
|
||||||
|
let mut guard = shared
|
||||||
|
.handshake
|
||||||
|
.unknown_sni_warn_next_allowed
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
*guard = None;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn should_emit_unknown_sni_warn_for_testing_in_shared(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
now: Instant,
|
||||||
|
) -> bool {
|
||||||
|
should_emit_unknown_sni_warn_in(shared, now)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn clear_warned_secrets_for_testing_in_shared(shared: &ProxySharedState) {
|
||||||
|
if let Ok(mut guard) = shared.handshake.invalid_secret_warned.lock() {
|
||||||
|
guard.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn warned_secrets_for_testing_in_shared(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
) -> &Mutex<HashSet<(String, String)>> {
|
||||||
|
&shared.handshake.invalid_secret_warned
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn auth_probe_is_throttled_for_testing_in_shared(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
peer_ip: IpAddr,
|
||||||
|
) -> bool {
|
||||||
|
auth_probe_is_throttled_in(shared, peer_ip, Instant::now())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn auth_probe_saturation_is_throttled_for_testing_in_shared(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
) -> bool {
|
||||||
|
auth_probe_saturation_is_throttled_in(shared, Instant::now())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn auth_probe_saturation_is_throttled_at_for_testing_in_shared(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
now: Instant,
|
||||||
|
) -> bool {
|
||||||
|
auth_probe_saturation_is_throttled_in(shared, now)
|
||||||
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn find_matching_tls_domain<'a>(config: &'a ProxyConfig, sni: &str) -> Option<&'a str> {
|
fn find_matching_tls_domain<'a>(config: &'a ProxyConfig, sni: &str) -> Option<&'a str> {
|
||||||
if config.censorship.tls_domain.eq_ignore_ascii_case(sni) {
|
if config.censorship.tls_domain.eq_ignore_ascii_case(sni) {
|
||||||
|
|
@ -635,6 +689,7 @@ impl Drop for HandshakeSuccess {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle fake TLS handshake
|
/// Handle fake TLS handshake
|
||||||
|
#[cfg(test)]
|
||||||
pub async fn handle_tls_handshake<R, W>(
|
pub async fn handle_tls_handshake<R, W>(
|
||||||
handshake: &[u8],
|
handshake: &[u8],
|
||||||
reader: R,
|
reader: R,
|
||||||
|
|
@ -645,6 +700,65 @@ pub async fn handle_tls_handshake<R, W>(
|
||||||
rng: &SecureRandom,
|
rng: &SecureRandom,
|
||||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
) -> HandshakeResult<(FakeTlsReader<R>, FakeTlsWriter<W>, String), R, W>
|
) -> HandshakeResult<(FakeTlsReader<R>, FakeTlsWriter<W>, String), R, W>
|
||||||
|
where
|
||||||
|
R: AsyncRead + Unpin,
|
||||||
|
W: AsyncWrite + Unpin,
|
||||||
|
{
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
|
handle_tls_handshake_impl(
|
||||||
|
handshake,
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
peer,
|
||||||
|
config,
|
||||||
|
replay_checker,
|
||||||
|
rng,
|
||||||
|
tls_cache,
|
||||||
|
shared.as_ref(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_tls_handshake_with_shared<R, W>(
|
||||||
|
handshake: &[u8],
|
||||||
|
reader: R,
|
||||||
|
writer: W,
|
||||||
|
peer: SocketAddr,
|
||||||
|
config: &ProxyConfig,
|
||||||
|
replay_checker: &ReplayChecker,
|
||||||
|
rng: &SecureRandom,
|
||||||
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
) -> HandshakeResult<(FakeTlsReader<R>, FakeTlsWriter<W>, String), R, W>
|
||||||
|
where
|
||||||
|
R: AsyncRead + Unpin,
|
||||||
|
W: AsyncWrite + Unpin,
|
||||||
|
{
|
||||||
|
handle_tls_handshake_impl(
|
||||||
|
handshake,
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
peer,
|
||||||
|
config,
|
||||||
|
replay_checker,
|
||||||
|
rng,
|
||||||
|
tls_cache,
|
||||||
|
shared,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_tls_handshake_impl<R, W>(
|
||||||
|
handshake: &[u8],
|
||||||
|
reader: R,
|
||||||
|
mut writer: W,
|
||||||
|
peer: SocketAddr,
|
||||||
|
config: &ProxyConfig,
|
||||||
|
replay_checker: &ReplayChecker,
|
||||||
|
rng: &SecureRandom,
|
||||||
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
) -> HandshakeResult<(FakeTlsReader<R>, FakeTlsWriter<W>, String), R, W>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin,
|
R: AsyncRead + Unpin,
|
||||||
W: AsyncWrite + Unpin,
|
W: AsyncWrite + Unpin,
|
||||||
|
|
@ -652,14 +766,14 @@ where
|
||||||
debug!(peer = %peer, handshake_len = handshake.len(), "Processing TLS handshake");
|
debug!(peer = %peer, handshake_len = handshake.len(), "Processing TLS handshake");
|
||||||
|
|
||||||
let throttle_now = Instant::now();
|
let throttle_now = Instant::now();
|
||||||
if auth_probe_should_apply_preauth_throttle(peer.ip(), throttle_now) {
|
if auth_probe_should_apply_preauth_throttle_in(shared, peer.ip(), throttle_now) {
|
||||||
maybe_apply_server_hello_delay(config).await;
|
maybe_apply_server_hello_delay(config).await;
|
||||||
debug!(peer = %peer, "TLS handshake rejected by pre-auth probe throttle");
|
debug!(peer = %peer, "TLS handshake rejected by pre-auth probe throttle");
|
||||||
return HandshakeResult::BadClient { reader, writer };
|
return HandshakeResult::BadClient { reader, writer };
|
||||||
}
|
}
|
||||||
|
|
||||||
if handshake.len() < tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN + 1 {
|
if handshake.len() < tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN + 1 {
|
||||||
auth_probe_record_failure(peer.ip(), Instant::now());
|
auth_probe_record_failure_in(shared, peer.ip(), Instant::now());
|
||||||
maybe_apply_server_hello_delay(config).await;
|
maybe_apply_server_hello_delay(config).await;
|
||||||
debug!(peer = %peer, "TLS handshake too short");
|
debug!(peer = %peer, "TLS handshake too short");
|
||||||
return HandshakeResult::BadClient { reader, writer };
|
return HandshakeResult::BadClient { reader, writer };
|
||||||
|
|
@ -695,11 +809,11 @@ where
|
||||||
};
|
};
|
||||||
|
|
||||||
if client_sni.is_some() && matched_tls_domain.is_none() && preferred_user_hint.is_none() {
|
if client_sni.is_some() && matched_tls_domain.is_none() && preferred_user_hint.is_none() {
|
||||||
auth_probe_record_failure(peer.ip(), Instant::now());
|
auth_probe_record_failure_in(shared, peer.ip(), Instant::now());
|
||||||
maybe_apply_server_hello_delay(config).await;
|
maybe_apply_server_hello_delay(config).await;
|
||||||
let sni = client_sni.as_deref().unwrap_or_default();
|
let sni = client_sni.as_deref().unwrap_or_default();
|
||||||
let log_now = Instant::now();
|
let log_now = Instant::now();
|
||||||
if should_emit_unknown_sni_warn(log_now) {
|
if should_emit_unknown_sni_warn_in(shared, log_now) {
|
||||||
warn!(
|
warn!(
|
||||||
peer = %peer,
|
peer = %peer,
|
||||||
sni = %sni,
|
sni = %sni,
|
||||||
|
|
@ -722,7 +836,7 @@ where
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
let secrets = decode_user_secrets(config, preferred_user_hint);
|
let secrets = decode_user_secrets_in(shared, config, preferred_user_hint);
|
||||||
|
|
||||||
let validation = match tls::validate_tls_handshake_with_replay_window(
|
let validation = match tls::validate_tls_handshake_with_replay_window(
|
||||||
handshake,
|
handshake,
|
||||||
|
|
@ -732,7 +846,7 @@ where
|
||||||
) {
|
) {
|
||||||
Some(v) => v,
|
Some(v) => v,
|
||||||
None => {
|
None => {
|
||||||
auth_probe_record_failure(peer.ip(), Instant::now());
|
auth_probe_record_failure_in(shared, peer.ip(), Instant::now());
|
||||||
maybe_apply_server_hello_delay(config).await;
|
maybe_apply_server_hello_delay(config).await;
|
||||||
debug!(
|
debug!(
|
||||||
peer = %peer,
|
peer = %peer,
|
||||||
|
|
@ -746,7 +860,7 @@ where
|
||||||
// Reject known replay digests before expensive cache/domain/ALPN policy work.
|
// Reject known replay digests before expensive cache/domain/ALPN policy work.
|
||||||
let digest_half = &validation.digest[..tls::TLS_DIGEST_HALF_LEN];
|
let digest_half = &validation.digest[..tls::TLS_DIGEST_HALF_LEN];
|
||||||
if replay_checker.check_tls_digest(digest_half) {
|
if replay_checker.check_tls_digest(digest_half) {
|
||||||
auth_probe_record_failure(peer.ip(), Instant::now());
|
auth_probe_record_failure_in(shared, peer.ip(), Instant::now());
|
||||||
maybe_apply_server_hello_delay(config).await;
|
maybe_apply_server_hello_delay(config).await;
|
||||||
warn!(peer = %peer, "TLS replay attack detected (duplicate digest)");
|
warn!(peer = %peer, "TLS replay attack detected (duplicate digest)");
|
||||||
return HandshakeResult::BadClient { reader, writer };
|
return HandshakeResult::BadClient { reader, writer };
|
||||||
|
|
@ -827,7 +941,7 @@ where
|
||||||
"TLS handshake successful"
|
"TLS handshake successful"
|
||||||
);
|
);
|
||||||
|
|
||||||
auth_probe_record_success(peer.ip());
|
auth_probe_record_success_in(shared, peer.ip());
|
||||||
|
|
||||||
HandshakeResult::Success((
|
HandshakeResult::Success((
|
||||||
FakeTlsReader::new(reader),
|
FakeTlsReader::new(reader),
|
||||||
|
|
@ -837,6 +951,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle MTProto obfuscation handshake
|
/// Handle MTProto obfuscation handshake
|
||||||
|
#[cfg(test)]
|
||||||
pub async fn handle_mtproto_handshake<R, W>(
|
pub async fn handle_mtproto_handshake<R, W>(
|
||||||
handshake: &[u8; HANDSHAKE_LEN],
|
handshake: &[u8; HANDSHAKE_LEN],
|
||||||
reader: R,
|
reader: R,
|
||||||
|
|
@ -847,6 +962,65 @@ pub async fn handle_mtproto_handshake<R, W>(
|
||||||
is_tls: bool,
|
is_tls: bool,
|
||||||
preferred_user: Option<&str>,
|
preferred_user: Option<&str>,
|
||||||
) -> HandshakeResult<(CryptoReader<R>, CryptoWriter<W>, HandshakeSuccess), R, W>
|
) -> HandshakeResult<(CryptoReader<R>, CryptoWriter<W>, HandshakeSuccess), R, W>
|
||||||
|
where
|
||||||
|
R: AsyncRead + Unpin + Send,
|
||||||
|
W: AsyncWrite + Unpin + Send,
|
||||||
|
{
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
|
handle_mtproto_handshake_impl(
|
||||||
|
handshake,
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
peer,
|
||||||
|
config,
|
||||||
|
replay_checker,
|
||||||
|
is_tls,
|
||||||
|
preferred_user,
|
||||||
|
shared.as_ref(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn handle_mtproto_handshake_with_shared<R, W>(
|
||||||
|
handshake: &[u8; HANDSHAKE_LEN],
|
||||||
|
reader: R,
|
||||||
|
writer: W,
|
||||||
|
peer: SocketAddr,
|
||||||
|
config: &ProxyConfig,
|
||||||
|
replay_checker: &ReplayChecker,
|
||||||
|
is_tls: bool,
|
||||||
|
preferred_user: Option<&str>,
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
) -> HandshakeResult<(CryptoReader<R>, CryptoWriter<W>, HandshakeSuccess), R, W>
|
||||||
|
where
|
||||||
|
R: AsyncRead + Unpin + Send,
|
||||||
|
W: AsyncWrite + Unpin + Send,
|
||||||
|
{
|
||||||
|
handle_mtproto_handshake_impl(
|
||||||
|
handshake,
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
peer,
|
||||||
|
config,
|
||||||
|
replay_checker,
|
||||||
|
is_tls,
|
||||||
|
preferred_user,
|
||||||
|
shared,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_mtproto_handshake_impl<R, W>(
|
||||||
|
handshake: &[u8; HANDSHAKE_LEN],
|
||||||
|
reader: R,
|
||||||
|
writer: W,
|
||||||
|
peer: SocketAddr,
|
||||||
|
config: &ProxyConfig,
|
||||||
|
replay_checker: &ReplayChecker,
|
||||||
|
is_tls: bool,
|
||||||
|
preferred_user: Option<&str>,
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
) -> HandshakeResult<(CryptoReader<R>, CryptoWriter<W>, HandshakeSuccess), R, W>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send,
|
R: AsyncRead + Unpin + Send,
|
||||||
W: AsyncWrite + Unpin + Send,
|
W: AsyncWrite + Unpin + Send,
|
||||||
|
|
@ -862,7 +1036,7 @@ where
|
||||||
);
|
);
|
||||||
|
|
||||||
let throttle_now = Instant::now();
|
let throttle_now = Instant::now();
|
||||||
if auth_probe_should_apply_preauth_throttle(peer.ip(), throttle_now) {
|
if auth_probe_should_apply_preauth_throttle_in(shared, peer.ip(), throttle_now) {
|
||||||
maybe_apply_server_hello_delay(config).await;
|
maybe_apply_server_hello_delay(config).await;
|
||||||
debug!(peer = %peer, "MTProto handshake rejected by pre-auth probe throttle");
|
debug!(peer = %peer, "MTProto handshake rejected by pre-auth probe throttle");
|
||||||
return HandshakeResult::BadClient { reader, writer };
|
return HandshakeResult::BadClient { reader, writer };
|
||||||
|
|
@ -872,7 +1046,7 @@ where
|
||||||
|
|
||||||
let enc_prekey_iv: Vec<u8> = dec_prekey_iv.iter().rev().copied().collect();
|
let enc_prekey_iv: Vec<u8> = dec_prekey_iv.iter().rev().copied().collect();
|
||||||
|
|
||||||
let decoded_users = decode_user_secrets(config, preferred_user);
|
let decoded_users = decode_user_secrets_in(shared, config, preferred_user);
|
||||||
|
|
||||||
for (user, secret) in decoded_users {
|
for (user, secret) in decoded_users {
|
||||||
let dec_prekey = &dec_prekey_iv[..PREKEY_LEN];
|
let dec_prekey = &dec_prekey_iv[..PREKEY_LEN];
|
||||||
|
|
@ -932,7 +1106,7 @@ where
|
||||||
// entry from the cache. We accept the cost of performing the full
|
// entry from the cache. We accept the cost of performing the full
|
||||||
// authentication check first to avoid poisoning the replay cache.
|
// authentication check first to avoid poisoning the replay cache.
|
||||||
if replay_checker.check_and_add_handshake(dec_prekey_iv) {
|
if replay_checker.check_and_add_handshake(dec_prekey_iv) {
|
||||||
auth_probe_record_failure(peer.ip(), Instant::now());
|
auth_probe_record_failure_in(shared, peer.ip(), Instant::now());
|
||||||
maybe_apply_server_hello_delay(config).await;
|
maybe_apply_server_hello_delay(config).await;
|
||||||
warn!(peer = %peer, user = %user, "MTProto replay attack detected");
|
warn!(peer = %peer, user = %user, "MTProto replay attack detected");
|
||||||
return HandshakeResult::BadClient { reader, writer };
|
return HandshakeResult::BadClient { reader, writer };
|
||||||
|
|
@ -959,7 +1133,7 @@ where
|
||||||
"MTProto handshake successful"
|
"MTProto handshake successful"
|
||||||
);
|
);
|
||||||
|
|
||||||
auth_probe_record_success(peer.ip());
|
auth_probe_record_success_in(shared, peer.ip());
|
||||||
|
|
||||||
let max_pending = config.general.crypto_pending_buffer;
|
let max_pending = config.general.crypto_pending_buffer;
|
||||||
return HandshakeResult::Success((
|
return HandshakeResult::Success((
|
||||||
|
|
@ -969,7 +1143,7 @@ where
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
auth_probe_record_failure(peer.ip(), Instant::now());
|
auth_probe_record_failure_in(shared, peer.ip(), Instant::now());
|
||||||
maybe_apply_server_hello_delay(config).await;
|
maybe_apply_server_hello_delay(config).await;
|
||||||
debug!(peer = %peer, "MTProto handshake: no matching user found");
|
debug!(peer = %peer, "MTProto handshake: no matching user found");
|
||||||
HandshakeResult::BadClient { reader, writer }
|
HandshakeResult::BadClient { reader, writer }
|
||||||
|
|
|
||||||
|
|
@ -1,14 +1,16 @@
|
||||||
use std::collections::hash_map::RandomState;
|
#[cfg(test)]
|
||||||
|
use std::collections::hash_map::DefaultHasher;
|
||||||
use std::collections::{BTreeSet, HashMap};
|
use std::collections::{BTreeSet, HashMap};
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::hash::{BuildHasher, Hash};
|
use std::hash::{BuildHasher, Hash};
|
||||||
|
#[cfg(test)]
|
||||||
|
use std::hash::Hasher;
|
||||||
use std::net::{IpAddr, SocketAddr};
|
use std::net::{IpAddr, SocketAddr};
|
||||||
use std::sync::atomic::{AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
use std::sync::{Arc, Mutex, OnceLock};
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use dashmap::DashMap;
|
|
||||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
use tokio::sync::{mpsc, oneshot, watch};
|
use tokio::sync::{mpsc, oneshot, watch};
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
|
|
@ -19,6 +21,7 @@ use crate::crypto::SecureRandom;
|
||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
use crate::protocol::constants::{secure_padding_len, *};
|
use crate::protocol::constants::{secure_padding_len, *};
|
||||||
use crate::proxy::handshake::HandshakeSuccess;
|
use crate::proxy::handshake::HandshakeSuccess;
|
||||||
|
use crate::proxy::shared_state::ProxySharedState;
|
||||||
use crate::proxy::route_mode::{
|
use crate::proxy::route_mode::{
|
||||||
ROUTE_SWITCH_ERROR_MSG, RelayRouteMode, RouteCutoverState, affected_cutover_state,
|
ROUTE_SWITCH_ERROR_MSG, RelayRouteMode, RouteCutoverState, affected_cutover_state,
|
||||||
cutover_stagger_delay,
|
cutover_stagger_delay,
|
||||||
|
|
@ -51,19 +54,9 @@ const ME_D2C_FLUSH_BATCH_MAX_BYTES_MIN: usize = 4096;
|
||||||
const ME_D2C_FRAME_BUF_SHRINK_HYSTERESIS_FACTOR: usize = 2;
|
const ME_D2C_FRAME_BUF_SHRINK_HYSTERESIS_FACTOR: usize = 2;
|
||||||
const ME_D2C_SINGLE_WRITE_COALESCE_MAX_BYTES: usize = 128 * 1024;
|
const ME_D2C_SINGLE_WRITE_COALESCE_MAX_BYTES: usize = 128 * 1024;
|
||||||
const QUOTA_RESERVE_SPIN_RETRIES: usize = 32;
|
const QUOTA_RESERVE_SPIN_RETRIES: usize = 32;
|
||||||
static DESYNC_DEDUP: OnceLock<DashMap<u64, Instant>> = OnceLock::new();
|
|
||||||
static DESYNC_DEDUP_PREVIOUS: OnceLock<DashMap<u64, Instant>> = OnceLock::new();
|
|
||||||
static DESYNC_HASHER: OnceLock<RandomState> = OnceLock::new();
|
|
||||||
static DESYNC_FULL_CACHE_LAST_EMIT_AT: OnceLock<Mutex<Option<Instant>>> = OnceLock::new();
|
|
||||||
static DESYNC_DEDUP_ROTATION_STATE: OnceLock<Mutex<DesyncDedupRotationState>> = OnceLock::new();
|
|
||||||
// Invariant for async callers:
|
|
||||||
// this std::sync::Mutex is allowed only because critical sections are short,
|
|
||||||
// synchronous, and MUST never cross an `.await`.
|
|
||||||
static RELAY_IDLE_CANDIDATE_REGISTRY: OnceLock<Mutex<RelayIdleCandidateRegistry>> = OnceLock::new();
|
|
||||||
static RELAY_IDLE_MARK_SEQ: AtomicU64 = AtomicU64::new(0);
|
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct DesyncDedupRotationState {
|
pub(crate) struct DesyncDedupRotationState {
|
||||||
current_started_at: Option<Instant>,
|
current_started_at: Option<Instant>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -80,7 +73,7 @@ struct RelayForensicsState {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct RelayIdleCandidateRegistry {
|
pub(crate) struct RelayIdleCandidateRegistry {
|
||||||
by_conn_id: HashMap<u64, RelayIdleCandidateMeta>,
|
by_conn_id: HashMap<u64, RelayIdleCandidateMeta>,
|
||||||
ordered: BTreeSet<(u64, u64)>,
|
ordered: BTreeSet<(u64, u64)>,
|
||||||
pressure_event_seq: u64,
|
pressure_event_seq: u64,
|
||||||
|
|
@ -93,20 +86,14 @@ struct RelayIdleCandidateMeta {
|
||||||
mark_pressure_seq: u64,
|
mark_pressure_seq: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn relay_idle_candidate_registry() -> &'static Mutex<RelayIdleCandidateRegistry> {
|
fn relay_idle_candidate_registry_lock_in(
|
||||||
RELAY_IDLE_CANDIDATE_REGISTRY.get_or_init(|| Mutex::new(RelayIdleCandidateRegistry::default()))
|
shared: &ProxySharedState,
|
||||||
}
|
) -> std::sync::MutexGuard<'_, RelayIdleCandidateRegistry> {
|
||||||
|
let registry = &shared.middle_relay.relay_idle_registry;
|
||||||
fn relay_idle_candidate_registry_lock() -> std::sync::MutexGuard<'static, RelayIdleCandidateRegistry>
|
|
||||||
{
|
|
||||||
// Keep lock scope narrow and synchronous: callers must drop guard before any `.await`.
|
|
||||||
let registry = relay_idle_candidate_registry();
|
|
||||||
match registry.lock() {
|
match registry.lock() {
|
||||||
Ok(guard) => guard,
|
Ok(guard) => guard,
|
||||||
Err(poisoned) => {
|
Err(poisoned) => {
|
||||||
let mut guard = poisoned.into_inner();
|
let mut guard = poisoned.into_inner();
|
||||||
// Fail closed after panic while holding registry lock: drop all
|
|
||||||
// candidates and pressure cursors to avoid stale cross-session state.
|
|
||||||
*guard = RelayIdleCandidateRegistry::default();
|
*guard = RelayIdleCandidateRegistry::default();
|
||||||
registry.clear_poison();
|
registry.clear_poison();
|
||||||
guard
|
guard
|
||||||
|
|
@ -114,14 +101,16 @@ fn relay_idle_candidate_registry_lock() -> std::sync::MutexGuard<'static, RelayI
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mark_relay_idle_candidate(conn_id: u64) -> bool {
|
fn mark_relay_idle_candidate_in(shared: &ProxySharedState, conn_id: u64) -> bool {
|
||||||
let mut guard = relay_idle_candidate_registry_lock();
|
let mut guard = relay_idle_candidate_registry_lock_in(shared);
|
||||||
|
|
||||||
if guard.by_conn_id.contains_key(&conn_id) {
|
if guard.by_conn_id.contains_key(&conn_id) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
let mark_order_seq = RELAY_IDLE_MARK_SEQ
|
let mark_order_seq = shared
|
||||||
|
.middle_relay
|
||||||
|
.relay_idle_mark_seq
|
||||||
.fetch_add(1, Ordering::Relaxed)
|
.fetch_add(1, Ordering::Relaxed)
|
||||||
.saturating_add(1);
|
.saturating_add(1);
|
||||||
let meta = RelayIdleCandidateMeta {
|
let meta = RelayIdleCandidateMeta {
|
||||||
|
|
@ -133,36 +122,31 @@ fn mark_relay_idle_candidate(conn_id: u64) -> bool {
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear_relay_idle_candidate(conn_id: u64) {
|
fn clear_relay_idle_candidate_in(shared: &ProxySharedState, conn_id: u64) {
|
||||||
let mut guard = relay_idle_candidate_registry_lock();
|
let mut guard = relay_idle_candidate_registry_lock_in(shared);
|
||||||
|
|
||||||
if let Some(meta) = guard.by_conn_id.remove(&conn_id) {
|
if let Some(meta) = guard.by_conn_id.remove(&conn_id) {
|
||||||
guard.ordered.remove(&(meta.mark_order_seq, conn_id));
|
guard.ordered.remove(&(meta.mark_order_seq, conn_id));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
fn note_relay_pressure_event_in(shared: &ProxySharedState) {
|
||||||
fn oldest_relay_idle_candidate() -> Option<u64> {
|
let mut guard = relay_idle_candidate_registry_lock_in(shared);
|
||||||
let guard = relay_idle_candidate_registry_lock();
|
|
||||||
guard.ordered.iter().next().map(|(_, conn_id)| *conn_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn note_relay_pressure_event() {
|
|
||||||
let mut guard = relay_idle_candidate_registry_lock();
|
|
||||||
guard.pressure_event_seq = guard.pressure_event_seq.wrapping_add(1);
|
guard.pressure_event_seq = guard.pressure_event_seq.wrapping_add(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn relay_pressure_event_seq() -> u64 {
|
fn relay_pressure_event_seq_in(shared: &ProxySharedState) -> u64 {
|
||||||
let guard = relay_idle_candidate_registry_lock();
|
let guard = relay_idle_candidate_registry_lock_in(shared);
|
||||||
guard.pressure_event_seq
|
guard.pressure_event_seq
|
||||||
}
|
}
|
||||||
|
|
||||||
fn maybe_evict_idle_candidate_on_pressure(
|
fn maybe_evict_idle_candidate_on_pressure_in(
|
||||||
|
shared: &ProxySharedState,
|
||||||
conn_id: u64,
|
conn_id: u64,
|
||||||
seen_pressure_seq: &mut u64,
|
seen_pressure_seq: &mut u64,
|
||||||
stats: &Stats,
|
stats: &Stats,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
let mut guard = relay_idle_candidate_registry_lock();
|
let mut guard = relay_idle_candidate_registry_lock_in(shared);
|
||||||
|
|
||||||
let latest_pressure_seq = guard.pressure_event_seq;
|
let latest_pressure_seq = guard.pressure_event_seq;
|
||||||
if latest_pressure_seq == *seen_pressure_seq {
|
if latest_pressure_seq == *seen_pressure_seq {
|
||||||
|
|
@ -192,7 +176,6 @@ fn maybe_evict_idle_candidate_on_pressure(
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Pressure events that happened before candidate soft-mark are stale for this candidate.
|
|
||||||
if latest_pressure_seq == candidate_meta.mark_pressure_seq {
|
if latest_pressure_seq == candidate_meta.mark_pressure_seq {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
@ -205,15 +188,6 @@ fn maybe_evict_idle_candidate_on_pressure(
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
fn clear_relay_idle_pressure_state_for_testing() {
|
|
||||||
if RELAY_IDLE_CANDIDATE_REGISTRY.get().is_some() {
|
|
||||||
let mut guard = relay_idle_candidate_registry_lock();
|
|
||||||
*guard = RelayIdleCandidateRegistry::default();
|
|
||||||
}
|
|
||||||
RELAY_IDLE_MARK_SEQ.store(0, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy)]
|
#[derive(Clone, Copy)]
|
||||||
struct MeD2cFlushPolicy {
|
struct MeD2cFlushPolicy {
|
||||||
max_frames: usize,
|
max_frames: usize,
|
||||||
|
|
@ -235,31 +209,41 @@ struct RelayClientIdlePolicy {
|
||||||
|
|
||||||
impl RelayClientIdlePolicy {
|
impl RelayClientIdlePolicy {
|
||||||
fn from_config(config: &ProxyConfig) -> Self {
|
fn from_config(config: &ProxyConfig) -> Self {
|
||||||
Self {
|
let frame_read_timeout =
|
||||||
enabled: config.timeouts.relay_idle_policy_v2_enabled,
|
Duration::from_secs(config.timeouts.relay_client_idle_hard_secs.max(1));
|
||||||
soft_idle: Duration::from_secs(config.timeouts.relay_client_idle_soft_secs.max(1)),
|
if !config.timeouts.relay_idle_policy_v2_enabled {
|
||||||
hard_idle: Duration::from_secs(config.timeouts.relay_client_idle_hard_secs.max(1)),
|
return Self::disabled(frame_read_timeout);
|
||||||
grace_after_downstream_activity: Duration::from_secs(
|
}
|
||||||
|
|
||||||
|
let soft_idle = Duration::from_secs(config.timeouts.relay_client_idle_soft_secs.max(1));
|
||||||
|
let hard_idle = Duration::from_secs(config.timeouts.relay_client_idle_hard_secs.max(1));
|
||||||
|
let grace_after_downstream_activity = Duration::from_secs(
|
||||||
config
|
config
|
||||||
.timeouts
|
.timeouts
|
||||||
.relay_idle_grace_after_downstream_activity_secs,
|
.relay_idle_grace_after_downstream_activity_secs,
|
||||||
),
|
);
|
||||||
legacy_frame_read_timeout: Duration::from_secs(config.timeouts.client_handshake.max(1)),
|
|
||||||
|
Self {
|
||||||
|
enabled: true,
|
||||||
|
soft_idle,
|
||||||
|
hard_idle,
|
||||||
|
grace_after_downstream_activity,
|
||||||
|
legacy_frame_read_timeout: frame_read_timeout,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
fn disabled(frame_read_timeout: Duration) -> Self {
|
fn disabled(frame_read_timeout: Duration) -> Self {
|
||||||
Self {
|
Self {
|
||||||
enabled: false,
|
enabled: false,
|
||||||
soft_idle: Duration::from_secs(0),
|
soft_idle: frame_read_timeout,
|
||||||
hard_idle: Duration::from_secs(0),
|
hard_idle: frame_read_timeout,
|
||||||
grace_after_downstream_activity: Duration::from_secs(0),
|
grace_after_downstream_activity: Duration::ZERO,
|
||||||
legacy_frame_read_timeout: frame_read_timeout,
|
legacy_frame_read_timeout: frame_read_timeout,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
struct RelayClientIdleState {
|
struct RelayClientIdleState {
|
||||||
last_client_frame_at: Instant,
|
last_client_frame_at: Instant,
|
||||||
soft_idle_marked: bool,
|
soft_idle_marked: bool,
|
||||||
|
|
@ -303,24 +287,39 @@ impl MeD2cFlushPolicy {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
fn hash_value<T: Hash>(value: &T) -> u64 {
|
fn hash_value<T: Hash>(value: &T) -> u64 {
|
||||||
let state = DESYNC_HASHER.get_or_init(RandomState::new);
|
let mut hasher = DefaultHasher::new();
|
||||||
state.hash_one(value)
|
value.hash(&mut hasher);
|
||||||
|
hasher.finish()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn hash_value_in<T: Hash>(shared: &ProxySharedState, value: &T) -> u64 {
|
||||||
|
shared.middle_relay.desync_hasher.hash_one(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
fn hash_ip(ip: IpAddr) -> u64 {
|
fn hash_ip(ip: IpAddr) -> u64 {
|
||||||
hash_value(&ip)
|
hash_value(&ip)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool {
|
fn hash_ip_in(shared: &ProxySharedState, ip: IpAddr) -> u64 {
|
||||||
|
hash_value_in(shared, &ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn should_emit_full_desync_in(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
key: u64,
|
||||||
|
all_full: bool,
|
||||||
|
now: Instant,
|
||||||
|
) -> bool {
|
||||||
if all_full {
|
if all_full {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
let dedup_current = DESYNC_DEDUP.get_or_init(DashMap::new);
|
let dedup_current = &shared.middle_relay.desync_dedup;
|
||||||
let dedup_previous = DESYNC_DEDUP_PREVIOUS.get_or_init(DashMap::new);
|
let dedup_previous = &shared.middle_relay.desync_dedup_previous;
|
||||||
let rotation_state =
|
let rotation_state = &shared.middle_relay.desync_dedup_rotation_state;
|
||||||
DESYNC_DEDUP_ROTATION_STATE.get_or_init(|| Mutex::new(DesyncDedupRotationState::default()));
|
|
||||||
|
|
||||||
let mut state = match rotation_state.lock() {
|
let mut state = match rotation_state.lock() {
|
||||||
Ok(guard) => guard,
|
Ok(guard) => guard,
|
||||||
|
|
@ -366,8 +365,6 @@ fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool {
|
||||||
None => true,
|
None => true,
|
||||||
};
|
};
|
||||||
if within_window {
|
if within_window {
|
||||||
// Keep the original timestamp when promoting from previous bucket,
|
|
||||||
// so dedup expiry remains tied to first-seen time.
|
|
||||||
dedup_current.insert(key, seen_at);
|
dedup_current.insert(key, seen_at);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
@ -375,8 +372,6 @@ fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
if dedup_current.len() >= DESYNC_DEDUP_MAX_ENTRIES {
|
if dedup_current.len() >= DESYNC_DEDUP_MAX_ENTRIES {
|
||||||
// Bounded eviction path: rotate buckets instead of scanning/evicting
|
|
||||||
// arbitrary entries from a saturated single map.
|
|
||||||
dedup_previous.clear();
|
dedup_previous.clear();
|
||||||
for entry in dedup_current.iter() {
|
for entry in dedup_current.iter() {
|
||||||
dedup_previous.insert(*entry.key(), *entry.value());
|
dedup_previous.insert(*entry.key(), *entry.value());
|
||||||
|
|
@ -384,15 +379,15 @@ fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool {
|
||||||
dedup_current.clear();
|
dedup_current.clear();
|
||||||
state.current_started_at = Some(now);
|
state.current_started_at = Some(now);
|
||||||
dedup_current.insert(key, now);
|
dedup_current.insert(key, now);
|
||||||
should_emit_full_desync_full_cache(now)
|
should_emit_full_desync_full_cache_in(shared, now)
|
||||||
} else {
|
} else {
|
||||||
dedup_current.insert(key, now);
|
dedup_current.insert(key, now);
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_emit_full_desync_full_cache(now: Instant) -> bool {
|
fn should_emit_full_desync_full_cache_in(shared: &ProxySharedState, now: Instant) -> bool {
|
||||||
let gate = DESYNC_FULL_CACHE_LAST_EMIT_AT.get_or_init(|| Mutex::new(None));
|
let gate = &shared.middle_relay.desync_full_cache_last_emit_at;
|
||||||
let Ok(mut last_emit_at) = gate.lock() else {
|
let Ok(mut last_emit_at) = gate.lock() else {
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
@ -417,46 +412,6 @@ fn should_emit_full_desync_full_cache(now: Instant) -> bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
fn clear_desync_dedup_for_testing() {
|
|
||||||
if let Some(dedup) = DESYNC_DEDUP.get() {
|
|
||||||
dedup.clear();
|
|
||||||
}
|
|
||||||
if let Some(dedup_previous) = DESYNC_DEDUP_PREVIOUS.get() {
|
|
||||||
dedup_previous.clear();
|
|
||||||
}
|
|
||||||
if let Some(rotation_state) = DESYNC_DEDUP_ROTATION_STATE.get() {
|
|
||||||
match rotation_state.lock() {
|
|
||||||
Ok(mut guard) => {
|
|
||||||
*guard = DesyncDedupRotationState::default();
|
|
||||||
}
|
|
||||||
Err(poisoned) => {
|
|
||||||
let mut guard = poisoned.into_inner();
|
|
||||||
*guard = DesyncDedupRotationState::default();
|
|
||||||
rotation_state.clear_poison();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(last_emit_at) = DESYNC_FULL_CACHE_LAST_EMIT_AT.get() {
|
|
||||||
match last_emit_at.lock() {
|
|
||||||
Ok(mut guard) => {
|
|
||||||
*guard = None;
|
|
||||||
}
|
|
||||||
Err(poisoned) => {
|
|
||||||
let mut guard = poisoned.into_inner();
|
|
||||||
*guard = None;
|
|
||||||
last_emit_at.clear_poison();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
fn desync_dedup_test_lock() -> &'static Mutex<()> {
|
|
||||||
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
|
||||||
TEST_LOCK.get_or_init(|| Mutex::new(()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn desync_forensics_len_bytes(len: usize) -> ([u8; 4], bool) {
|
fn desync_forensics_len_bytes(len: usize) -> ([u8; 4], bool) {
|
||||||
match u32::try_from(len) {
|
match u32::try_from(len) {
|
||||||
Ok(value) => (value.to_le_bytes(), false),
|
Ok(value) => (value.to_le_bytes(), false),
|
||||||
|
|
@ -464,7 +419,8 @@ fn desync_forensics_len_bytes(len: usize) -> ([u8; 4], bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn report_desync_frame_too_large(
|
fn report_desync_frame_too_large_in(
|
||||||
|
shared: &ProxySharedState,
|
||||||
state: &RelayForensicsState,
|
state: &RelayForensicsState,
|
||||||
proto_tag: ProtoTag,
|
proto_tag: ProtoTag,
|
||||||
frame_counter: u64,
|
frame_counter: u64,
|
||||||
|
|
@ -482,13 +438,13 @@ fn report_desync_frame_too_large(
|
||||||
.map(|b| matches!(b[0], b'G' | b'P' | b'H' | b'C' | b'D'))
|
.map(|b| matches!(b[0], b'G' | b'P' | b'H' | b'C' | b'D'))
|
||||||
.unwrap_or(false);
|
.unwrap_or(false);
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let dedup_key = hash_value(&(
|
let dedup_key = hash_value_in(shared, &(
|
||||||
state.user.as_str(),
|
state.user.as_str(),
|
||||||
state.peer_hash,
|
state.peer_hash,
|
||||||
proto_tag,
|
proto_tag,
|
||||||
DESYNC_ERROR_CLASS,
|
DESYNC_ERROR_CLASS,
|
||||||
));
|
));
|
||||||
let emit_full = should_emit_full_desync(dedup_key, state.desync_all_full, now);
|
let emit_full = should_emit_full_desync_in(shared, dedup_key, state.desync_all_full, now);
|
||||||
let duration_ms = state.started_at.elapsed().as_millis() as u64;
|
let duration_ms = state.started_at.elapsed().as_millis() as u64;
|
||||||
let bytes_me2c = state.bytes_me2c.load(Ordering::Relaxed);
|
let bytes_me2c = state.bytes_me2c.load(Ordering::Relaxed);
|
||||||
|
|
||||||
|
|
@ -557,6 +513,29 @@ fn report_desync_frame_too_large(
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
fn report_desync_frame_too_large(
|
||||||
|
state: &RelayForensicsState,
|
||||||
|
proto_tag: ProtoTag,
|
||||||
|
frame_counter: u64,
|
||||||
|
max_frame: usize,
|
||||||
|
len: usize,
|
||||||
|
raw_len_bytes: Option<[u8; 4]>,
|
||||||
|
stats: &Stats,
|
||||||
|
) -> ProxyError {
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
|
report_desync_frame_too_large_in(
|
||||||
|
shared.as_ref(),
|
||||||
|
state,
|
||||||
|
proto_tag,
|
||||||
|
frame_counter,
|
||||||
|
max_frame,
|
||||||
|
len,
|
||||||
|
raw_len_bytes,
|
||||||
|
stats,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
fn should_yield_c2me_sender(sent_since_yield: usize, has_backlog: bool) -> bool {
|
fn should_yield_c2me_sender(sent_since_yield: usize, has_backlog: bool) -> bool {
|
||||||
has_backlog && sent_since_yield >= C2ME_SENDER_FAIRNESS_BUDGET
|
has_backlog && sent_since_yield >= C2ME_SENDER_FAIRNESS_BUDGET
|
||||||
}
|
}
|
||||||
|
|
@ -629,19 +608,263 @@ fn observe_me_d2c_flush_event(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
fn relay_idle_pressure_test_guard() -> &'static Mutex<()> {
|
pub(crate) fn mark_relay_idle_candidate_for_testing(shared: &ProxySharedState, conn_id: u64) -> bool {
|
||||||
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
let registry = &shared.middle_relay.relay_idle_registry;
|
||||||
TEST_LOCK.get_or_init(|| Mutex::new(()))
|
let mut guard = match registry.lock() {
|
||||||
|
Ok(guard) => guard,
|
||||||
|
Err(poisoned) => {
|
||||||
|
let mut guard = poisoned.into_inner();
|
||||||
|
*guard = RelayIdleCandidateRegistry::default();
|
||||||
|
registry.clear_poison();
|
||||||
|
guard
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if guard.by_conn_id.contains_key(&conn_id) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mark_order_seq = shared
|
||||||
|
.middle_relay
|
||||||
|
.relay_idle_mark_seq
|
||||||
|
.fetch_add(1, Ordering::Relaxed);
|
||||||
|
let mark_pressure_seq = guard.pressure_event_seq;
|
||||||
|
let meta = RelayIdleCandidateMeta {
|
||||||
|
mark_order_seq,
|
||||||
|
mark_pressure_seq,
|
||||||
|
};
|
||||||
|
guard.by_conn_id.insert(conn_id, meta);
|
||||||
|
guard.ordered.insert((mark_order_seq, conn_id));
|
||||||
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub(crate) fn relay_idle_pressure_test_scope() -> std::sync::MutexGuard<'static, ()> {
|
pub(crate) fn oldest_relay_idle_candidate_for_testing(shared: &ProxySharedState) -> Option<u64> {
|
||||||
relay_idle_pressure_test_guard()
|
let registry = &shared.middle_relay.relay_idle_registry;
|
||||||
.lock()
|
let guard = match registry.lock() {
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
Ok(guard) => guard,
|
||||||
|
Err(poisoned) => {
|
||||||
|
let mut guard = poisoned.into_inner();
|
||||||
|
*guard = RelayIdleCandidateRegistry::default();
|
||||||
|
registry.clear_poison();
|
||||||
|
guard
|
||||||
|
}
|
||||||
|
};
|
||||||
|
guard.ordered.iter().next().map(|(_, conn_id)| *conn_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn enqueue_c2me_command(
|
#[cfg(test)]
|
||||||
|
pub(crate) fn clear_relay_idle_candidate_for_testing(shared: &ProxySharedState, conn_id: u64) {
|
||||||
|
let registry = &shared.middle_relay.relay_idle_registry;
|
||||||
|
let mut guard = match registry.lock() {
|
||||||
|
Ok(guard) => guard,
|
||||||
|
Err(poisoned) => {
|
||||||
|
let mut guard = poisoned.into_inner();
|
||||||
|
*guard = RelayIdleCandidateRegistry::default();
|
||||||
|
registry.clear_poison();
|
||||||
|
guard
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if let Some(meta) = guard.by_conn_id.remove(&conn_id) {
|
||||||
|
guard.ordered.remove(&(meta.mark_order_seq, conn_id));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn clear_relay_idle_pressure_state_for_testing_in_shared(shared: &ProxySharedState) {
|
||||||
|
if let Ok(mut guard) = shared.middle_relay.relay_idle_registry.lock() {
|
||||||
|
*guard = RelayIdleCandidateRegistry::default();
|
||||||
|
}
|
||||||
|
shared
|
||||||
|
.middle_relay
|
||||||
|
.relay_idle_mark_seq
|
||||||
|
.store(0, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn note_relay_pressure_event_for_testing(shared: &ProxySharedState) {
|
||||||
|
note_relay_pressure_event_in(shared);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn relay_pressure_event_seq_for_testing(shared: &ProxySharedState) -> u64 {
|
||||||
|
relay_pressure_event_seq_in(shared)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn relay_idle_mark_seq_for_testing(shared: &ProxySharedState) -> u64 {
|
||||||
|
shared.middle_relay.relay_idle_mark_seq.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn maybe_evict_idle_candidate_on_pressure_for_testing(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
conn_id: u64,
|
||||||
|
seen_pressure_seq: &mut u64,
|
||||||
|
stats: &Stats,
|
||||||
|
) -> bool {
|
||||||
|
maybe_evict_idle_candidate_on_pressure_in(shared, conn_id, seen_pressure_seq, stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn set_relay_pressure_state_for_testing(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
pressure_event_seq: u64,
|
||||||
|
pressure_consumed_seq: u64,
|
||||||
|
) {
|
||||||
|
let registry = &shared.middle_relay.relay_idle_registry;
|
||||||
|
let mut guard = match registry.lock() {
|
||||||
|
Ok(guard) => guard,
|
||||||
|
Err(poisoned) => {
|
||||||
|
let mut guard = poisoned.into_inner();
|
||||||
|
*guard = RelayIdleCandidateRegistry::default();
|
||||||
|
registry.clear_poison();
|
||||||
|
guard
|
||||||
|
}
|
||||||
|
};
|
||||||
|
guard.pressure_event_seq = pressure_event_seq;
|
||||||
|
guard.pressure_consumed_seq = pressure_consumed_seq;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn should_emit_full_desync_for_testing(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
key: u64,
|
||||||
|
all_full: bool,
|
||||||
|
now: Instant,
|
||||||
|
) -> bool {
|
||||||
|
if all_full {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
let dedup_current = &shared.middle_relay.desync_dedup;
|
||||||
|
let dedup_previous = &shared.middle_relay.desync_dedup_previous;
|
||||||
|
|
||||||
|
let Ok(mut state) = shared.middle_relay.desync_dedup_rotation_state.lock() else {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
|
||||||
|
let rotate_now = match state.current_started_at {
|
||||||
|
Some(current_started_at) => match now.checked_duration_since(current_started_at) {
|
||||||
|
Some(elapsed) => elapsed >= DESYNC_DEDUP_WINDOW,
|
||||||
|
None => true,
|
||||||
|
},
|
||||||
|
None => true,
|
||||||
|
};
|
||||||
|
if rotate_now {
|
||||||
|
dedup_previous.clear();
|
||||||
|
for entry in dedup_current.iter() {
|
||||||
|
dedup_previous.insert(*entry.key(), *entry.value());
|
||||||
|
}
|
||||||
|
dedup_current.clear();
|
||||||
|
state.current_started_at = Some(now);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(seen_at) = dedup_current.get(&key).map(|entry| *entry.value()) {
|
||||||
|
let within_window = match now.checked_duration_since(seen_at) {
|
||||||
|
Some(elapsed) => elapsed < DESYNC_DEDUP_WINDOW,
|
||||||
|
None => true,
|
||||||
|
};
|
||||||
|
if within_window {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
dedup_current.insert(key, now);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(seen_at) = dedup_previous.get(&key).map(|entry| *entry.value()) {
|
||||||
|
let within_window = match now.checked_duration_since(seen_at) {
|
||||||
|
Some(elapsed) => elapsed < DESYNC_DEDUP_WINDOW,
|
||||||
|
None => true,
|
||||||
|
};
|
||||||
|
if within_window {
|
||||||
|
dedup_current.insert(key, seen_at);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
dedup_previous.remove(&key);
|
||||||
|
}
|
||||||
|
|
||||||
|
if dedup_current.len() >= DESYNC_DEDUP_MAX_ENTRIES {
|
||||||
|
dedup_previous.clear();
|
||||||
|
for entry in dedup_current.iter() {
|
||||||
|
dedup_previous.insert(*entry.key(), *entry.value());
|
||||||
|
}
|
||||||
|
dedup_current.clear();
|
||||||
|
state.current_started_at = Some(now);
|
||||||
|
dedup_current.insert(key, now);
|
||||||
|
let Ok(mut last_emit_at) = shared.middle_relay.desync_full_cache_last_emit_at.lock() else {
|
||||||
|
return false;
|
||||||
|
};
|
||||||
|
return match *last_emit_at {
|
||||||
|
None => {
|
||||||
|
*last_emit_at = Some(now);
|
||||||
|
true
|
||||||
|
}
|
||||||
|
Some(last) => {
|
||||||
|
let Some(elapsed) = now.checked_duration_since(last) else {
|
||||||
|
*last_emit_at = Some(now);
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
if elapsed >= DESYNC_FULL_CACHE_EMIT_MIN_INTERVAL {
|
||||||
|
*last_emit_at = Some(now);
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
dedup_current.insert(key, now);
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn clear_desync_dedup_for_testing_in_shared(shared: &ProxySharedState) {
|
||||||
|
shared.middle_relay.desync_dedup.clear();
|
||||||
|
shared.middle_relay.desync_dedup_previous.clear();
|
||||||
|
if let Ok(mut rotation_state) = shared.middle_relay.desync_dedup_rotation_state.lock() {
|
||||||
|
*rotation_state = DesyncDedupRotationState::default();
|
||||||
|
}
|
||||||
|
if let Ok(mut last_emit_at) = shared.middle_relay.desync_full_cache_last_emit_at.lock() {
|
||||||
|
*last_emit_at = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn desync_dedup_len_for_testing(shared: &ProxySharedState) -> usize {
|
||||||
|
shared.middle_relay.desync_dedup.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn desync_dedup_insert_for_testing(shared: &ProxySharedState, key: u64, at: Instant) {
|
||||||
|
shared.middle_relay.desync_dedup.insert(key, at);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn desync_dedup_get_for_testing(
|
||||||
|
shared: &ProxySharedState,
|
||||||
|
key: u64,
|
||||||
|
) -> Option<Instant> {
|
||||||
|
shared
|
||||||
|
.middle_relay
|
||||||
|
.desync_dedup
|
||||||
|
.get(&key)
|
||||||
|
.map(|entry| *entry.value())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub(crate) fn desync_dedup_keys_for_testing(shared: &ProxySharedState) -> std::collections::HashSet<u64> {
|
||||||
|
shared
|
||||||
|
.middle_relay
|
||||||
|
.desync_dedup
|
||||||
|
.iter()
|
||||||
|
.map(|entry| *entry.key())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn enqueue_c2me_command_in(
|
||||||
|
shared: &ProxySharedState,
|
||||||
tx: &mpsc::Sender<C2MeCommand>,
|
tx: &mpsc::Sender<C2MeCommand>,
|
||||||
cmd: C2MeCommand,
|
cmd: C2MeCommand,
|
||||||
send_timeout: Option<Duration>,
|
send_timeout: Option<Duration>,
|
||||||
|
|
@ -653,7 +876,7 @@ async fn enqueue_c2me_command(
|
||||||
Err(mpsc::error::TrySendError::Full(cmd)) => {
|
Err(mpsc::error::TrySendError::Full(cmd)) => {
|
||||||
stats.increment_me_c2me_send_full_total();
|
stats.increment_me_c2me_send_full_total();
|
||||||
stats.increment_me_c2me_send_high_water_total();
|
stats.increment_me_c2me_send_high_water_total();
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_in(shared);
|
||||||
// Cooperative yield reduces burst catch-up when the per-conn queue is near saturation.
|
// Cooperative yield reduces burst catch-up when the per-conn queue is near saturation.
|
||||||
if tx.capacity() <= C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS {
|
if tx.capacity() <= C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS {
|
||||||
tokio::task::yield_now().await;
|
tokio::task::yield_now().await;
|
||||||
|
|
@ -682,6 +905,17 @@ async fn enqueue_c2me_command(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
async fn enqueue_c2me_command(
|
||||||
|
tx: &mpsc::Sender<C2MeCommand>,
|
||||||
|
cmd: C2MeCommand,
|
||||||
|
send_timeout: Option<Duration>,
|
||||||
|
stats: &Stats,
|
||||||
|
) -> std::result::Result<(), mpsc::error::SendError<C2MeCommand>> {
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
|
enqueue_c2me_command_in(shared.as_ref(), tx, cmd, send_timeout, stats).await
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
async fn run_relay_test_step_timeout<F, T>(context: &'static str, fut: F) -> T
|
async fn run_relay_test_step_timeout<F, T>(context: &'static str, fut: F) -> T
|
||||||
where
|
where
|
||||||
|
|
@ -705,6 +939,7 @@ pub(crate) async fn handle_via_middle_proxy<R, W>(
|
||||||
mut route_rx: watch::Receiver<RouteCutoverState>,
|
mut route_rx: watch::Receiver<RouteCutoverState>,
|
||||||
route_snapshot: RouteCutoverState,
|
route_snapshot: RouteCutoverState,
|
||||||
session_id: u64,
|
session_id: u64,
|
||||||
|
shared: Arc<ProxySharedState>,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
|
|
@ -735,7 +970,7 @@ where
|
||||||
conn_id,
|
conn_id,
|
||||||
user: user.clone(),
|
user: user.clone(),
|
||||||
peer,
|
peer,
|
||||||
peer_hash: hash_ip(peer.ip()),
|
peer_hash: hash_ip_in(shared.as_ref(), peer.ip()),
|
||||||
started_at: Instant::now(),
|
started_at: Instant::now(),
|
||||||
bytes_c2me: 0,
|
bytes_c2me: 0,
|
||||||
bytes_me2c: bytes_me2c.clone(),
|
bytes_me2c: bytes_me2c.clone(),
|
||||||
|
|
@ -1184,10 +1419,11 @@ where
|
||||||
let mut client_closed = false;
|
let mut client_closed = false;
|
||||||
let mut frame_counter: u64 = 0;
|
let mut frame_counter: u64 = 0;
|
||||||
let mut route_watch_open = true;
|
let mut route_watch_open = true;
|
||||||
let mut seen_pressure_seq = relay_pressure_event_seq();
|
let mut seen_pressure_seq = relay_pressure_event_seq_in(shared.as_ref());
|
||||||
loop {
|
loop {
|
||||||
if relay_idle_policy.enabled
|
if relay_idle_policy.enabled
|
||||||
&& maybe_evict_idle_candidate_on_pressure(
|
&& maybe_evict_idle_candidate_on_pressure_in(
|
||||||
|
shared.as_ref(),
|
||||||
conn_id,
|
conn_id,
|
||||||
&mut seen_pressure_seq,
|
&mut seen_pressure_seq,
|
||||||
stats.as_ref(),
|
stats.as_ref(),
|
||||||
|
|
@ -1199,7 +1435,8 @@ where
|
||||||
user = %user,
|
user = %user,
|
||||||
"Middle-relay pressure eviction for idle-candidate session"
|
"Middle-relay pressure eviction for idle-candidate session"
|
||||||
);
|
);
|
||||||
let _ = enqueue_c2me_command(
|
let _ = enqueue_c2me_command_in(
|
||||||
|
shared.as_ref(),
|
||||||
&c2me_tx,
|
&c2me_tx,
|
||||||
C2MeCommand::Close,
|
C2MeCommand::Close,
|
||||||
c2me_send_timeout,
|
c2me_send_timeout,
|
||||||
|
|
@ -1224,7 +1461,8 @@ where
|
||||||
"Cutover affected middle session, closing client connection"
|
"Cutover affected middle session, closing client connection"
|
||||||
);
|
);
|
||||||
tokio::time::sleep(delay).await;
|
tokio::time::sleep(delay).await;
|
||||||
let _ = enqueue_c2me_command(
|
let _ = enqueue_c2me_command_in(
|
||||||
|
shared.as_ref(),
|
||||||
&c2me_tx,
|
&c2me_tx,
|
||||||
C2MeCommand::Close,
|
C2MeCommand::Close,
|
||||||
c2me_send_timeout,
|
c2me_send_timeout,
|
||||||
|
|
@ -1241,7 +1479,7 @@ where
|
||||||
route_watch_open = false;
|
route_watch_open = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
payload_result = read_client_payload_with_idle_policy(
|
payload_result = read_client_payload_with_idle_policy_in(
|
||||||
&mut crypto_reader,
|
&mut crypto_reader,
|
||||||
proto_tag,
|
proto_tag,
|
||||||
frame_limit,
|
frame_limit,
|
||||||
|
|
@ -1249,6 +1487,7 @@ where
|
||||||
&forensics,
|
&forensics,
|
||||||
&mut frame_counter,
|
&mut frame_counter,
|
||||||
&stats,
|
&stats,
|
||||||
|
shared.as_ref(),
|
||||||
&relay_idle_policy,
|
&relay_idle_policy,
|
||||||
&mut relay_idle_state,
|
&mut relay_idle_state,
|
||||||
last_downstream_activity_ms.as_ref(),
|
last_downstream_activity_ms.as_ref(),
|
||||||
|
|
@ -1288,7 +1527,8 @@ where
|
||||||
flags |= RPC_FLAG_NOT_ENCRYPTED;
|
flags |= RPC_FLAG_NOT_ENCRYPTED;
|
||||||
}
|
}
|
||||||
// Keep client read loop lightweight: route heavy ME send path via a dedicated task.
|
// Keep client read loop lightweight: route heavy ME send path via a dedicated task.
|
||||||
if enqueue_c2me_command(
|
if enqueue_c2me_command_in(
|
||||||
|
shared.as_ref(),
|
||||||
&c2me_tx,
|
&c2me_tx,
|
||||||
C2MeCommand::Data { payload, flags },
|
C2MeCommand::Data { payload, flags },
|
||||||
c2me_send_timeout,
|
c2me_send_timeout,
|
||||||
|
|
@ -1304,7 +1544,8 @@ where
|
||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
debug!(conn_id, "Client EOF");
|
debug!(conn_id, "Client EOF");
|
||||||
client_closed = true;
|
client_closed = true;
|
||||||
let _ = enqueue_c2me_command(
|
let _ = enqueue_c2me_command_in(
|
||||||
|
shared.as_ref(),
|
||||||
&c2me_tx,
|
&c2me_tx,
|
||||||
C2MeCommand::Close,
|
C2MeCommand::Close,
|
||||||
c2me_send_timeout,
|
c2me_send_timeout,
|
||||||
|
|
@ -1359,7 +1600,7 @@ where
|
||||||
frames_ok = frame_counter,
|
frames_ok = frame_counter,
|
||||||
"ME relay cleanup"
|
"ME relay cleanup"
|
||||||
);
|
);
|
||||||
clear_relay_idle_candidate(conn_id);
|
clear_relay_idle_candidate_in(shared.as_ref(), conn_id);
|
||||||
me_pool.registry().unregister(conn_id).await;
|
me_pool.registry().unregister(conn_id).await;
|
||||||
buffer_pool.trim_to(buffer_pool.max_buffers().min(64));
|
buffer_pool.trim_to(buffer_pool.max_buffers().min(64));
|
||||||
let pool_snapshot = buffer_pool.stats();
|
let pool_snapshot = buffer_pool.stats();
|
||||||
|
|
@ -1371,7 +1612,7 @@ where
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn read_client_payload_with_idle_policy<R>(
|
async fn read_client_payload_with_idle_policy_in<R>(
|
||||||
client_reader: &mut CryptoReader<R>,
|
client_reader: &mut CryptoReader<R>,
|
||||||
proto_tag: ProtoTag,
|
proto_tag: ProtoTag,
|
||||||
max_frame: usize,
|
max_frame: usize,
|
||||||
|
|
@ -1379,6 +1620,7 @@ async fn read_client_payload_with_idle_policy<R>(
|
||||||
forensics: &RelayForensicsState,
|
forensics: &RelayForensicsState,
|
||||||
frame_counter: &mut u64,
|
frame_counter: &mut u64,
|
||||||
stats: &Stats,
|
stats: &Stats,
|
||||||
|
shared: &ProxySharedState,
|
||||||
idle_policy: &RelayClientIdlePolicy,
|
idle_policy: &RelayClientIdlePolicy,
|
||||||
idle_state: &mut RelayClientIdleState,
|
idle_state: &mut RelayClientIdleState,
|
||||||
last_downstream_activity_ms: &AtomicU64,
|
last_downstream_activity_ms: &AtomicU64,
|
||||||
|
|
@ -1398,6 +1640,7 @@ where
|
||||||
session_started_at: Instant,
|
session_started_at: Instant,
|
||||||
forensics: &RelayForensicsState,
|
forensics: &RelayForensicsState,
|
||||||
stats: &Stats,
|
stats: &Stats,
|
||||||
|
shared: &ProxySharedState,
|
||||||
read_label: &'static str,
|
read_label: &'static str,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
|
|
@ -1433,7 +1676,7 @@ where
|
||||||
let hard_deadline =
|
let hard_deadline =
|
||||||
hard_deadline(idle_policy, idle_state, session_started_at, downstream_ms);
|
hard_deadline(idle_policy, idle_state, session_started_at, downstream_ms);
|
||||||
if now >= hard_deadline {
|
if now >= hard_deadline {
|
||||||
clear_relay_idle_candidate(forensics.conn_id);
|
clear_relay_idle_candidate_in(shared, forensics.conn_id);
|
||||||
stats.increment_relay_idle_hard_close_total();
|
stats.increment_relay_idle_hard_close_total();
|
||||||
let client_idle_secs = now
|
let client_idle_secs = now
|
||||||
.saturating_duration_since(idle_state.last_client_frame_at)
|
.saturating_duration_since(idle_state.last_client_frame_at)
|
||||||
|
|
@ -1471,7 +1714,7 @@ where
|
||||||
>= idle_policy.soft_idle
|
>= idle_policy.soft_idle
|
||||||
{
|
{
|
||||||
idle_state.soft_idle_marked = true;
|
idle_state.soft_idle_marked = true;
|
||||||
if mark_relay_idle_candidate(forensics.conn_id) {
|
if mark_relay_idle_candidate_in(shared, forensics.conn_id) {
|
||||||
stats.increment_relay_idle_soft_mark_total();
|
stats.increment_relay_idle_soft_mark_total();
|
||||||
}
|
}
|
||||||
info!(
|
info!(
|
||||||
|
|
@ -1541,6 +1784,7 @@ where
|
||||||
session_started_at,
|
session_started_at,
|
||||||
forensics,
|
forensics,
|
||||||
stats,
|
stats,
|
||||||
|
shared,
|
||||||
"abridged.first_len_byte",
|
"abridged.first_len_byte",
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
|
@ -1564,6 +1808,7 @@ where
|
||||||
session_started_at,
|
session_started_at,
|
||||||
forensics,
|
forensics,
|
||||||
stats,
|
stats,
|
||||||
|
shared,
|
||||||
"abridged.extended_len",
|
"abridged.extended_len",
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
@ -1588,6 +1833,7 @@ where
|
||||||
session_started_at,
|
session_started_at,
|
||||||
forensics,
|
forensics,
|
||||||
stats,
|
stats,
|
||||||
|
shared,
|
||||||
"len_prefix",
|
"len_prefix",
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
|
|
@ -1644,7 +1890,8 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
if len > max_frame {
|
if len > max_frame {
|
||||||
return Err(report_desync_frame_too_large(
|
return Err(report_desync_frame_too_large_in(
|
||||||
|
shared,
|
||||||
forensics,
|
forensics,
|
||||||
proto_tag,
|
proto_tag,
|
||||||
*frame_counter,
|
*frame_counter,
|
||||||
|
|
@ -1686,6 +1933,7 @@ where
|
||||||
session_started_at,
|
session_started_at,
|
||||||
forensics,
|
forensics,
|
||||||
stats,
|
stats,
|
||||||
|
shared,
|
||||||
"payload",
|
"payload",
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
@ -1697,11 +1945,46 @@ where
|
||||||
*frame_counter += 1;
|
*frame_counter += 1;
|
||||||
idle_state.on_client_frame(Instant::now());
|
idle_state.on_client_frame(Instant::now());
|
||||||
idle_state.tiny_frame_debt = idle_state.tiny_frame_debt.saturating_sub(1);
|
idle_state.tiny_frame_debt = idle_state.tiny_frame_debt.saturating_sub(1);
|
||||||
clear_relay_idle_candidate(forensics.conn_id);
|
clear_relay_idle_candidate_in(shared, forensics.conn_id);
|
||||||
return Ok(Some((payload, quickack)));
|
return Ok(Some((payload, quickack)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
async fn read_client_payload_with_idle_policy<R>(
|
||||||
|
client_reader: &mut CryptoReader<R>,
|
||||||
|
proto_tag: ProtoTag,
|
||||||
|
max_frame: usize,
|
||||||
|
buffer_pool: &Arc<BufferPool>,
|
||||||
|
forensics: &RelayForensicsState,
|
||||||
|
frame_counter: &mut u64,
|
||||||
|
stats: &Stats,
|
||||||
|
idle_policy: &RelayClientIdlePolicy,
|
||||||
|
idle_state: &mut RelayClientIdleState,
|
||||||
|
last_downstream_activity_ms: &AtomicU64,
|
||||||
|
session_started_at: Instant,
|
||||||
|
) -> Result<Option<(PooledBuffer, bool)>>
|
||||||
|
where
|
||||||
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
|
{
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
|
read_client_payload_with_idle_policy_in(
|
||||||
|
client_reader,
|
||||||
|
proto_tag,
|
||||||
|
max_frame,
|
||||||
|
buffer_pool,
|
||||||
|
forensics,
|
||||||
|
frame_counter,
|
||||||
|
stats,
|
||||||
|
shared.as_ref(),
|
||||||
|
idle_policy,
|
||||||
|
idle_state,
|
||||||
|
last_downstream_activity_ms,
|
||||||
|
session_started_at,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
async fn read_client_payload_legacy<R>(
|
async fn read_client_payload_legacy<R>(
|
||||||
client_reader: &mut CryptoReader<R>,
|
client_reader: &mut CryptoReader<R>,
|
||||||
|
|
@ -1717,10 +2000,11 @@ where
|
||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
{
|
{
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
let mut idle_state = RelayClientIdleState::new(now);
|
let mut idle_state = RelayClientIdleState::new(now);
|
||||||
let last_downstream_activity_ms = AtomicU64::new(0);
|
let last_downstream_activity_ms = AtomicU64::new(0);
|
||||||
let idle_policy = RelayClientIdlePolicy::disabled(frame_read_timeout);
|
let idle_policy = RelayClientIdlePolicy::disabled(frame_read_timeout);
|
||||||
read_client_payload_with_idle_policy(
|
read_client_payload_with_idle_policy_in(
|
||||||
client_reader,
|
client_reader,
|
||||||
proto_tag,
|
proto_tag,
|
||||||
max_frame,
|
max_frame,
|
||||||
|
|
@ -1728,6 +2012,7 @@ where
|
||||||
forensics,
|
forensics,
|
||||||
frame_counter,
|
frame_counter,
|
||||||
stats,
|
stats,
|
||||||
|
shared.as_ref(),
|
||||||
&idle_policy,
|
&idle_policy,
|
||||||
&mut idle_state,
|
&mut idle_state,
|
||||||
&last_downstream_activity_ms,
|
&last_downstream_activity_ms,
|
||||||
|
|
|
||||||
|
|
@ -67,6 +67,7 @@ pub mod middle_relay;
|
||||||
pub mod relay;
|
pub mod relay;
|
||||||
pub mod route_mode;
|
pub mod route_mode;
|
||||||
pub mod session_eviction;
|
pub mod session_eviction;
|
||||||
|
pub mod shared_state;
|
||||||
|
|
||||||
pub use client::ClientHandler;
|
pub use client::ClientHandler;
|
||||||
#[allow(unused_imports)]
|
#[allow(unused_imports)]
|
||||||
|
|
@ -79,3 +80,11 @@ pub use relay::*;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[path = "tests/test_harness_common.rs"]
|
#[path = "tests/test_harness_common.rs"]
|
||||||
mod test_harness_common;
|
mod test_harness_common;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[path = "tests/proxy_shared_state_isolation_tests.rs"]
|
||||||
|
mod proxy_shared_state_isolation_tests;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[path = "tests/proxy_shared_state_parallel_execution_tests.rs"]
|
||||||
|
mod proxy_shared_state_parallel_execution_tests;
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,57 @@
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::collections::hash_map::RandomState;
|
||||||
|
use std::net::IpAddr;
|
||||||
|
use std::sync::atomic::AtomicU64;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
|
use dashmap::DashMap;
|
||||||
|
|
||||||
|
use crate::proxy::handshake::{AuthProbeState, AuthProbeSaturationState};
|
||||||
|
use crate::proxy::middle_relay::{DesyncDedupRotationState, RelayIdleCandidateRegistry};
|
||||||
|
|
||||||
|
pub(crate) struct HandshakeSharedState {
|
||||||
|
pub(crate) auth_probe: DashMap<IpAddr, AuthProbeState>,
|
||||||
|
pub(crate) auth_probe_saturation: Mutex<Option<AuthProbeSaturationState>>,
|
||||||
|
pub(crate) auth_probe_eviction_hasher: RandomState,
|
||||||
|
pub(crate) invalid_secret_warned: Mutex<HashSet<(String, String)>>,
|
||||||
|
pub(crate) unknown_sni_warn_next_allowed: Mutex<Option<Instant>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct MiddleRelaySharedState {
|
||||||
|
pub(crate) desync_dedup: DashMap<u64, Instant>,
|
||||||
|
pub(crate) desync_dedup_previous: DashMap<u64, Instant>,
|
||||||
|
pub(crate) desync_hasher: RandomState,
|
||||||
|
pub(crate) desync_full_cache_last_emit_at: Mutex<Option<Instant>>,
|
||||||
|
pub(crate) desync_dedup_rotation_state: Mutex<DesyncDedupRotationState>,
|
||||||
|
pub(crate) relay_idle_registry: Mutex<RelayIdleCandidateRegistry>,
|
||||||
|
pub(crate) relay_idle_mark_seq: AtomicU64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct ProxySharedState {
|
||||||
|
pub(crate) handshake: HandshakeSharedState,
|
||||||
|
pub(crate) middle_relay: MiddleRelaySharedState,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ProxySharedState {
|
||||||
|
pub(crate) fn new() -> Arc<Self> {
|
||||||
|
Arc::new(Self {
|
||||||
|
handshake: HandshakeSharedState {
|
||||||
|
auth_probe: DashMap::new(),
|
||||||
|
auth_probe_saturation: Mutex::new(None),
|
||||||
|
auth_probe_eviction_hasher: RandomState::new(),
|
||||||
|
invalid_secret_warned: Mutex::new(HashSet::new()),
|
||||||
|
unknown_sni_warn_next_allowed: Mutex::new(None),
|
||||||
|
},
|
||||||
|
middle_relay: MiddleRelaySharedState {
|
||||||
|
desync_dedup: DashMap::new(),
|
||||||
|
desync_dedup_previous: DashMap::new(),
|
||||||
|
desync_hasher: RandomState::new(),
|
||||||
|
desync_full_cache_last_emit_at: Mutex::new(None),
|
||||||
|
desync_dedup_rotation_state: Mutex::new(DesyncDedupRotationState::default()),
|
||||||
|
relay_idle_registry: Mutex::new(RelayIdleCandidateRegistry::default()),
|
||||||
|
relay_idle_mark_seq: AtomicU64::new(0),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -7,12 +7,6 @@ use std::time::{Duration, Instant};
|
||||||
|
|
||||||
// --- Helpers ---
|
// --- Helpers ---
|
||||||
|
|
||||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
|
||||||
auth_probe_test_lock()
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
||||||
let mut cfg = ProxyConfig::default();
|
let mut cfg = ProxyConfig::default();
|
||||||
cfg.access.users.clear();
|
cfg.access.users.clear();
|
||||||
|
|
@ -147,8 +141,8 @@ fn make_valid_tls_client_hello_with_alpn(
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn tls_minimum_viable_length_boundary() {
|
async fn tls_minimum_viable_length_boundary() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret = [0x11u8; 16];
|
let secret = [0x11u8; 16];
|
||||||
let config = test_config_with_secret_hex("11111111111111111111111111111111");
|
let config = test_config_with_secret_hex("11111111111111111111111111111111");
|
||||||
|
|
@ -200,8 +194,8 @@ async fn tls_minimum_viable_length_boundary() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_extreme_dc_index_serialization() {
|
async fn mtproto_extreme_dc_index_serialization() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "22222222222222222222222222222222";
|
let secret_hex = "22222222222222222222222222222222";
|
||||||
let config = test_config_with_secret_hex(secret_hex);
|
let config = test_config_with_secret_hex(secret_hex);
|
||||||
|
|
@ -241,8 +235,8 @@ async fn mtproto_extreme_dc_index_serialization() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn alpn_strict_case_and_padding_rejection() {
|
async fn alpn_strict_case_and_padding_rejection() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret = [0x33u8; 16];
|
let secret = [0x33u8; 16];
|
||||||
let mut config = test_config_with_secret_hex("33333333333333333333333333333333");
|
let mut config = test_config_with_secret_hex("33333333333333333333333333333333");
|
||||||
|
|
@ -297,8 +291,8 @@ fn ipv4_mapped_ipv6_bucketing_anomaly() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_invalid_ciphertext_does_not_poison_replay_cache() {
|
async fn mtproto_invalid_ciphertext_does_not_poison_replay_cache() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "55555555555555555555555555555555";
|
let secret_hex = "55555555555555555555555555555555";
|
||||||
let config = test_config_with_secret_hex(secret_hex);
|
let config = test_config_with_secret_hex(secret_hex);
|
||||||
|
|
@ -341,8 +335,8 @@ async fn mtproto_invalid_ciphertext_does_not_poison_replay_cache() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn tls_invalid_session_does_not_poison_replay_cache() {
|
async fn tls_invalid_session_does_not_poison_replay_cache() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret = [0x66u8; 16];
|
let secret = [0x66u8; 16];
|
||||||
let config = test_config_with_secret_hex("66666666666666666666666666666666");
|
let config = test_config_with_secret_hex("66666666666666666666666666666666");
|
||||||
|
|
@ -387,8 +381,8 @@ async fn tls_invalid_session_does_not_poison_replay_cache() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn server_hello_delay_timing_neutrality_on_hmac_failure() {
|
async fn server_hello_delay_timing_neutrality_on_hmac_failure() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret = [0x77u8; 16];
|
let secret = [0x77u8; 16];
|
||||||
let mut config = test_config_with_secret_hex("77777777777777777777777777777777");
|
let mut config = test_config_with_secret_hex("77777777777777777777777777777777");
|
||||||
|
|
@ -425,8 +419,8 @@ async fn server_hello_delay_timing_neutrality_on_hmac_failure() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn server_hello_delay_inversion_resilience() {
|
async fn server_hello_delay_inversion_resilience() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret = [0x88u8; 16];
|
let secret = [0x88u8; 16];
|
||||||
let mut config = test_config_with_secret_hex("88888888888888888888888888888888");
|
let mut config = test_config_with_secret_hex("88888888888888888888888888888888");
|
||||||
|
|
@ -462,10 +456,9 @@ async fn server_hello_delay_inversion_resilience() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mixed_valid_and_invalid_user_secrets_configuration() {
|
async fn mixed_valid_and_invalid_user_secrets_configuration() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
let _warn_guard = warned_secrets_test_lock().lock().unwrap();
|
clear_warned_secrets_for_testing_in_shared(shared.as_ref());
|
||||||
clear_warned_secrets_for_testing();
|
|
||||||
|
|
||||||
let mut config = ProxyConfig::default();
|
let mut config = ProxyConfig::default();
|
||||||
config.access.ignore_time_skew = true;
|
config.access.ignore_time_skew = true;
|
||||||
|
|
@ -513,8 +506,8 @@ async fn mixed_valid_and_invalid_user_secrets_configuration() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn tls_emulation_fallback_when_cache_missing() {
|
async fn tls_emulation_fallback_when_cache_missing() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret = [0xAAu8; 16];
|
let secret = [0xAAu8; 16];
|
||||||
let mut config = test_config_with_secret_hex("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
|
let mut config = test_config_with_secret_hex("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
|
||||||
|
|
@ -547,8 +540,8 @@ async fn tls_emulation_fallback_when_cache_missing() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn classic_mode_over_tls_transport_protocol_confusion() {
|
async fn classic_mode_over_tls_transport_protocol_confusion() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb";
|
let secret_hex = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb";
|
||||||
let mut config = test_config_with_secret_hex(secret_hex);
|
let mut config = test_config_with_secret_hex(secret_hex);
|
||||||
|
|
@ -608,8 +601,8 @@ fn generate_tg_nonce_never_emits_reserved_bytes() {
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn dashmap_concurrent_saturation_stress() {
|
async fn dashmap_concurrent_saturation_stress() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let ip_a: IpAddr = "192.0.2.13".parse().unwrap();
|
let ip_a: IpAddr = "192.0.2.13".parse().unwrap();
|
||||||
let ip_b: IpAddr = "198.51.100.13".parse().unwrap();
|
let ip_b: IpAddr = "198.51.100.13".parse().unwrap();
|
||||||
|
|
@ -617,9 +610,10 @@ async fn dashmap_concurrent_saturation_stress() {
|
||||||
|
|
||||||
for i in 0..100 {
|
for i in 0..100 {
|
||||||
let target_ip = if i % 2 == 0 { ip_a } else { ip_b };
|
let target_ip = if i % 2 == 0 { ip_a } else { ip_b };
|
||||||
|
let shared = shared.clone();
|
||||||
tasks.push(tokio::spawn(async move {
|
tasks.push(tokio::spawn(async move {
|
||||||
for _ in 0..50 {
|
for _ in 0..50 {
|
||||||
auth_probe_record_failure(target_ip, Instant::now());
|
auth_probe_record_failure_in(shared.as_ref(), target_ip, Instant::now());
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
@ -630,11 +624,11 @@ async fn dashmap_concurrent_saturation_stress() {
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
auth_probe_is_throttled_for_testing(ip_a),
|
auth_probe_is_throttled_for_testing_in_shared(shared.as_ref(), ip_a),
|
||||||
"IP A must be throttled after concurrent stress"
|
"IP A must be throttled after concurrent stress"
|
||||||
);
|
);
|
||||||
assert!(
|
assert!(
|
||||||
auth_probe_is_throttled_for_testing(ip_b),
|
auth_probe_is_throttled_for_testing_in_shared(shared.as_ref(), ip_b),
|
||||||
"IP B must be throttled after concurrent stress"
|
"IP B must be throttled after concurrent stress"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
@ -661,15 +655,15 @@ fn prototag_invalid_bytes_fail_closed() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn auth_probe_eviction_hash_collision_stress() {
|
fn auth_probe_eviction_hash_collision_stress() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let state = auth_probe_state_map();
|
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
for i in 0..10_000u32 {
|
for i in 0..10_000u32 {
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(10, 0, (i >> 8) as u8, (i & 0xFF) as u8));
|
let ip = IpAddr::V4(Ipv4Addr::new(10, 0, (i >> 8) as u8, (i & 0xFF) as u8));
|
||||||
auth_probe_record_failure_with_state(state, ip, now);
|
auth_probe_record_failure_with_state_in(shared.as_ref(), state, ip, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
|
|
|
||||||
|
|
@ -44,12 +44,6 @@ fn make_valid_mtproto_handshake(
|
||||||
handshake
|
handshake
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
|
||||||
auth_probe_test_lock()
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
||||||
let mut cfg = ProxyConfig::default();
|
let mut cfg = ProxyConfig::default();
|
||||||
cfg.access.users.clear();
|
cfg.access.users.clear();
|
||||||
|
|
@ -67,8 +61,8 @@ fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_handshake_bit_flip_anywhere_rejected() {
|
async fn mtproto_handshake_bit_flip_anywhere_rejected() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "11223344556677889900aabbccddeeff";
|
let secret_hex = "11223344556677889900aabbccddeeff";
|
||||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||||
|
|
@ -181,26 +175,26 @@ async fn mtproto_handshake_timing_neutrality_mocked() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn auth_probe_throttle_saturation_stress() {
|
async fn auth_probe_throttle_saturation_stress() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
// Record enough failures for one IP to trigger backoff
|
// Record enough failures for one IP to trigger backoff
|
||||||
let target_ip = IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1));
|
let target_ip = IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1));
|
||||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||||
auth_probe_record_failure(target_ip, now);
|
auth_probe_record_failure_in(shared.as_ref(), target_ip, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(auth_probe_is_throttled(target_ip, now));
|
assert!(auth_probe_is_throttled_in(shared.as_ref(), target_ip, now));
|
||||||
|
|
||||||
// Stress test with many unique IPs
|
// Stress test with many unique IPs
|
||||||
for i in 0..500u32 {
|
for i in 0..500u32 {
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, (i % 256) as u8));
|
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, (i % 256) as u8));
|
||||||
auth_probe_record_failure(ip, now);
|
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
let tracked = AUTH_PROBE_STATE.get().map(|state| state.len()).unwrap_or(0);
|
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
|
||||||
assert!(
|
assert!(
|
||||||
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
||||||
"auth probe state grew past hard cap: {tracked} > {AUTH_PROBE_TRACK_MAX_ENTRIES}"
|
"auth probe state grew past hard cap: {tracked} > {AUTH_PROBE_TRACK_MAX_ENTRIES}"
|
||||||
|
|
@ -209,8 +203,8 @@ async fn auth_probe_throttle_saturation_stress() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_handshake_abridged_prefix_rejected() {
|
async fn mtproto_handshake_abridged_prefix_rejected() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let mut handshake = [0x5Au8; HANDSHAKE_LEN];
|
let mut handshake = [0x5Au8; HANDSHAKE_LEN];
|
||||||
handshake[0] = 0xef; // Abridged prefix
|
handshake[0] = 0xef; // Abridged prefix
|
||||||
|
|
@ -235,8 +229,8 @@ async fn mtproto_handshake_abridged_prefix_rejected() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_handshake_preferred_user_mismatch_continues() {
|
async fn mtproto_handshake_preferred_user_mismatch_continues() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret1_hex = "11111111111111111111111111111111";
|
let secret1_hex = "11111111111111111111111111111111";
|
||||||
let secret2_hex = "22222222222222222222222222222222";
|
let secret2_hex = "22222222222222222222222222222222";
|
||||||
|
|
@ -278,8 +272,8 @@ async fn mtproto_handshake_preferred_user_mismatch_continues() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_handshake_concurrent_flood_stability() {
|
async fn mtproto_handshake_concurrent_flood_stability() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "00112233445566778899aabbccddeeff";
|
let secret_hex = "00112233445566778899aabbccddeeff";
|
||||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
|
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
|
||||||
|
|
@ -320,8 +314,8 @@ async fn mtproto_handshake_concurrent_flood_stability() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_replay_is_rejected_across_distinct_peers() {
|
async fn mtproto_replay_is_rejected_across_distinct_peers() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "0123456789abcdeffedcba9876543210";
|
let secret_hex = "0123456789abcdeffedcba9876543210";
|
||||||
let handshake = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
let handshake = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||||
|
|
@ -360,8 +354,8 @@ async fn mtproto_replay_is_rejected_across_distinct_peers() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_blackhat_mutation_corpus_never_panics_and_stays_fail_closed() {
|
async fn mtproto_blackhat_mutation_corpus_never_panics_and_stays_fail_closed() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "89abcdef012345670123456789abcdef";
|
let secret_hex = "89abcdef012345670123456789abcdef";
|
||||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||||
|
|
@ -405,27 +399,27 @@ async fn mtproto_blackhat_mutation_corpus_never_panics_and_stays_fail_closed() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn auth_probe_success_clears_throttled_peer_state() {
|
async fn auth_probe_success_clears_throttled_peer_state() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let target_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 90));
|
let target_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 90));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||||
auth_probe_record_failure(target_ip, now);
|
auth_probe_record_failure_in(shared.as_ref(), target_ip, now);
|
||||||
}
|
}
|
||||||
assert!(auth_probe_is_throttled(target_ip, now));
|
assert!(auth_probe_is_throttled_in(shared.as_ref(), target_ip, now));
|
||||||
|
|
||||||
auth_probe_record_success(target_ip);
|
auth_probe_record_success_in(shared.as_ref(), target_ip);
|
||||||
assert!(
|
assert!(
|
||||||
!auth_probe_is_throttled(target_ip, now + Duration::from_millis(1)),
|
!auth_probe_is_throttled_in(shared.as_ref(), target_ip, now + Duration::from_millis(1)),
|
||||||
"successful auth must clear per-peer throttle state"
|
"successful auth must clear per-peer throttle state"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() {
|
async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "00112233445566778899aabbccddeeff";
|
let secret_hex = "00112233445566778899aabbccddeeff";
|
||||||
let mut invalid = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
let mut invalid = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||||
|
|
@ -458,7 +452,7 @@ async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() {
|
||||||
assert!(matches!(res, HandshakeResult::BadClient { .. }));
|
assert!(matches!(res, HandshakeResult::BadClient { .. }));
|
||||||
}
|
}
|
||||||
|
|
||||||
let tracked = AUTH_PROBE_STATE.get().map(|state| state.len()).unwrap_or(0);
|
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
|
||||||
assert!(
|
assert!(
|
||||||
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
||||||
"probe map must remain bounded under invalid storm: {tracked}"
|
"probe map must remain bounded under invalid storm: {tracked}"
|
||||||
|
|
@ -467,8 +461,8 @@ async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_property_style_multi_bit_mutations_fail_closed_or_auth_only() {
|
async fn mtproto_property_style_multi_bit_mutations_fail_closed_or_auth_only() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "f0e1d2c3b4a5968778695a4b3c2d1e0f";
|
let secret_hex = "f0e1d2c3b4a5968778695a4b3c2d1e0f";
|
||||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||||
|
|
@ -520,8 +514,8 @@ async fn mtproto_property_style_multi_bit_mutations_fail_closed_or_auth_only() {
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "heavy soak; run manually"]
|
#[ignore = "heavy soak; run manually"]
|
||||||
async fn mtproto_blackhat_20k_mutation_soak_never_panics() {
|
async fn mtproto_blackhat_20k_mutation_soak_never_panics() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
|
let secret_hex = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
|
||||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||||
|
|
|
||||||
|
|
@ -3,15 +3,9 @@ use std::collections::HashSet;
|
||||||
use std::net::{IpAddr, Ipv4Addr};
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
|
||||||
auth_probe_test_lock()
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn adversarial_large_state_offsets_escape_first_scan_window() {
|
fn adversarial_large_state_offsets_escape_first_scan_window() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
let base = Instant::now();
|
let base = Instant::now();
|
||||||
let state_len = 65_536usize;
|
let state_len = 65_536usize;
|
||||||
let scan_limit = 1_024usize;
|
let scan_limit = 1_024usize;
|
||||||
|
|
@ -25,7 +19,7 @@ fn adversarial_large_state_offsets_escape_first_scan_window() {
|
||||||
((i.wrapping_mul(131)) & 0xff) as u8,
|
((i.wrapping_mul(131)) & 0xff) as u8,
|
||||||
));
|
));
|
||||||
let now = base + Duration::from_nanos(i);
|
let now = base + Duration::from_nanos(i);
|
||||||
let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
|
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||||
if start >= scan_limit {
|
if start >= scan_limit {
|
||||||
saw_offset_outside_first_window = true;
|
saw_offset_outside_first_window = true;
|
||||||
break;
|
break;
|
||||||
|
|
@ -40,7 +34,7 @@ fn adversarial_large_state_offsets_escape_first_scan_window() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn stress_large_state_offsets_cover_many_scan_windows() {
|
fn stress_large_state_offsets_cover_many_scan_windows() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
let base = Instant::now();
|
let base = Instant::now();
|
||||||
let state_len = 65_536usize;
|
let state_len = 65_536usize;
|
||||||
let scan_limit = 1_024usize;
|
let scan_limit = 1_024usize;
|
||||||
|
|
@ -54,7 +48,7 @@ fn stress_large_state_offsets_cover_many_scan_windows() {
|
||||||
((i.wrapping_mul(17)) & 0xff) as u8,
|
((i.wrapping_mul(17)) & 0xff) as u8,
|
||||||
));
|
));
|
||||||
let now = base + Duration::from_micros(i);
|
let now = base + Duration::from_micros(i);
|
||||||
let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
|
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||||
covered_windows.insert(start / scan_limit);
|
covered_windows.insert(start / scan_limit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -68,7 +62,7 @@ fn stress_large_state_offsets_cover_many_scan_windows() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn light_fuzz_offset_always_stays_inside_state_len() {
|
fn light_fuzz_offset_always_stays_inside_state_len() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
let mut seed = 0xC0FF_EE12_3456_789Au64;
|
let mut seed = 0xC0FF_EE12_3456_789Au64;
|
||||||
let base = Instant::now();
|
let base = Instant::now();
|
||||||
|
|
||||||
|
|
@ -86,7 +80,7 @@ fn light_fuzz_offset_always_stays_inside_state_len() {
|
||||||
let state_len = ((seed >> 16) as usize % 200_000).saturating_add(1);
|
let state_len = ((seed >> 16) as usize % 200_000).saturating_add(1);
|
||||||
let scan_limit = ((seed >> 40) as usize % 2_048).saturating_add(1);
|
let scan_limit = ((seed >> 40) as usize % 2_048).saturating_add(1);
|
||||||
let now = base + Duration::from_nanos(seed & 0x0fff);
|
let now = base + Duration::from_nanos(seed & 0x0fff);
|
||||||
let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
|
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
start < state_len,
|
start < state_len,
|
||||||
|
|
|
||||||
|
|
@ -2,68 +2,62 @@ use super::*;
|
||||||
use std::net::{IpAddr, Ipv4Addr};
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
|
||||||
auth_probe_test_lock()
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn positive_preauth_throttle_activates_after_failure_threshold() {
|
fn positive_preauth_throttle_activates_after_failure_threshold() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 20));
|
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 20));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||||
auth_probe_record_failure(ip, now);
|
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
auth_probe_is_throttled(ip, now),
|
auth_probe_is_throttled_in(shared.as_ref(), ip, now),
|
||||||
"peer must be throttled once fail streak reaches threshold"
|
"peer must be throttled once fail streak reaches threshold"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn negative_unrelated_peer_remains_unthrottled() {
|
fn negative_unrelated_peer_remains_unthrottled() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let attacker = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 12));
|
let attacker = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 12));
|
||||||
let benign = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 13));
|
let benign = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 13));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||||
auth_probe_record_failure(attacker, now);
|
auth_probe_record_failure_in(shared.as_ref(), attacker, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(auth_probe_is_throttled(attacker, now));
|
assert!(auth_probe_is_throttled_in(shared.as_ref(), attacker, now));
|
||||||
assert!(
|
assert!(
|
||||||
!auth_probe_is_throttled(benign, now),
|
!auth_probe_is_throttled_in(shared.as_ref(), benign, now),
|
||||||
"throttle state must stay scoped to normalized peer key"
|
"throttle state must stay scoped to normalized peer key"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn edge_expired_entry_is_pruned_and_no_longer_throttled() {
|
fn edge_expired_entry_is_pruned_and_no_longer_throttled() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 41));
|
let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 41));
|
||||||
let base = Instant::now();
|
let base = Instant::now();
|
||||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||||
auth_probe_record_failure(ip, base);
|
auth_probe_record_failure_in(shared.as_ref(), ip, base);
|
||||||
}
|
}
|
||||||
|
|
||||||
let expired_at = base + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS + 1);
|
let expired_at = base + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS + 1);
|
||||||
assert!(
|
assert!(
|
||||||
!auth_probe_is_throttled(ip, expired_at),
|
!auth_probe_is_throttled_in(shared.as_ref(), ip, expired_at),
|
||||||
"expired entries must not keep throttling peers"
|
"expired entries must not keep throttling peers"
|
||||||
);
|
);
|
||||||
|
|
||||||
let state = auth_probe_state_map();
|
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
assert!(
|
assert!(
|
||||||
state.get(&normalize_auth_probe_ip(ip)).is_none(),
|
state.get(&normalize_auth_probe_ip(ip)).is_none(),
|
||||||
"expired lookup should prune stale state"
|
"expired lookup should prune stale state"
|
||||||
|
|
@ -72,36 +66,36 @@ fn edge_expired_entry_is_pruned_and_no_longer_throttled() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn adversarial_saturation_grace_requires_extra_failures_before_preauth_throttle() {
|
fn adversarial_saturation_grace_requires_extra_failures_before_preauth_throttle() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 18, 0, 7));
|
let ip = IpAddr::V4(Ipv4Addr::new(198, 18, 0, 7));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||||
auth_probe_record_failure(ip, now);
|
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||||
}
|
}
|
||||||
auth_probe_note_saturation(now);
|
auth_probe_note_saturation_in(shared.as_ref(), now);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
!auth_probe_should_apply_preauth_throttle(ip, now),
|
!auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), ip, now),
|
||||||
"during global saturation, peer must receive configured grace window"
|
"during global saturation, peer must receive configured grace window"
|
||||||
);
|
);
|
||||||
|
|
||||||
for _ in 0..AUTH_PROBE_SATURATION_GRACE_FAILS {
|
for _ in 0..AUTH_PROBE_SATURATION_GRACE_FAILS {
|
||||||
auth_probe_record_failure(ip, now + Duration::from_millis(1));
|
auth_probe_record_failure_in(shared.as_ref(), ip, now + Duration::from_millis(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
auth_probe_should_apply_preauth_throttle(ip, now + Duration::from_millis(1)),
|
auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), ip, now + Duration::from_millis(1)),
|
||||||
"after grace failures are exhausted, preauth throttle must activate"
|
"after grace failures are exhausted, preauth throttle must activate"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn integration_over_cap_insertion_keeps_probe_map_bounded() {
|
fn integration_over_cap_insertion_keeps_probe_map_bounded() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
for idx in 0..(AUTH_PROBE_TRACK_MAX_ENTRIES + 1024) {
|
for idx in 0..(AUTH_PROBE_TRACK_MAX_ENTRIES + 1024) {
|
||||||
|
|
@ -111,10 +105,10 @@ fn integration_over_cap_insertion_keeps_probe_map_bounded() {
|
||||||
((idx / 256) % 256) as u8,
|
((idx / 256) % 256) as u8,
|
||||||
(idx % 256) as u8,
|
(idx % 256) as u8,
|
||||||
));
|
));
|
||||||
auth_probe_record_failure(ip, now);
|
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
let tracked = auth_probe_state_map().len();
|
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
|
||||||
assert!(
|
assert!(
|
||||||
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
||||||
"probe map must remain hard bounded under insertion storm"
|
"probe map must remain hard bounded under insertion storm"
|
||||||
|
|
@ -123,8 +117,8 @@ fn integration_over_cap_insertion_keeps_probe_map_bounded() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() {
|
fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let mut seed = 0x4D53_5854_6F66_6175u64;
|
let mut seed = 0x4D53_5854_6F66_6175u64;
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
@ -140,10 +134,10 @@ fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() {
|
||||||
(seed >> 8) as u8,
|
(seed >> 8) as u8,
|
||||||
seed as u8,
|
seed as u8,
|
||||||
));
|
));
|
||||||
auth_probe_record_failure(ip, now + Duration::from_millis((seed & 0x3f) as u64));
|
auth_probe_record_failure_in(shared.as_ref(), ip, now + Duration::from_millis((seed & 0x3f) as u64));
|
||||||
}
|
}
|
||||||
|
|
||||||
let state = auth_probe_state_map();
|
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
assert!(state.len() <= AUTH_PROBE_TRACK_MAX_ENTRIES);
|
assert!(state.len() <= AUTH_PROBE_TRACK_MAX_ENTRIES);
|
||||||
for entry in state.iter() {
|
for entry in state.iter() {
|
||||||
assert!(entry.value().fail_streak > 0);
|
assert!(entry.value().fail_streak > 0);
|
||||||
|
|
@ -152,13 +146,14 @@ fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() {
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn stress_parallel_failure_flood_keeps_state_hard_capped() {
|
async fn stress_parallel_failure_flood_keeps_state_hard_capped() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
let mut tasks = Vec::new();
|
let mut tasks = Vec::new();
|
||||||
|
|
||||||
for worker in 0..8u8 {
|
for worker in 0..8u8 {
|
||||||
|
let shared = shared.clone();
|
||||||
tasks.push(tokio::spawn(async move {
|
tasks.push(tokio::spawn(async move {
|
||||||
for i in 0..4096u32 {
|
for i in 0..4096u32 {
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(
|
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||||
|
|
@ -167,7 +162,7 @@ async fn stress_parallel_failure_flood_keeps_state_hard_capped() {
|
||||||
((i >> 8) & 0xff) as u8,
|
((i >> 8) & 0xff) as u8,
|
||||||
(i & 0xff) as u8,
|
(i & 0xff) as u8,
|
||||||
));
|
));
|
||||||
auth_probe_record_failure(ip, start + Duration::from_millis((i % 4) as u64));
|
auth_probe_record_failure_in(shared.as_ref(), ip, start + Duration::from_millis((i % 4) as u64));
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
@ -176,12 +171,12 @@ async fn stress_parallel_failure_flood_keeps_state_hard_capped() {
|
||||||
task.await.expect("stress worker must not panic");
|
task.await.expect("stress worker must not panic");
|
||||||
}
|
}
|
||||||
|
|
||||||
let tracked = auth_probe_state_map().len();
|
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
|
||||||
assert!(
|
assert!(
|
||||||
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
||||||
"parallel failure flood must not exceed cap"
|
"parallel failure flood must not exceed cap"
|
||||||
);
|
);
|
||||||
|
|
||||||
let probe = IpAddr::V4(Ipv4Addr::new(172, 3, 4, 5));
|
let probe = IpAddr::V4(Ipv4Addr::new(172, 3, 4, 5));
|
||||||
let _ = auth_probe_is_throttled(probe, start + Duration::from_millis(2));
|
let _ = auth_probe_is_throttled_in(shared.as_ref(), probe, start + Duration::from_millis(2));
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,20 +2,14 @@ use super::*;
|
||||||
use std::net::{IpAddr, Ipv4Addr};
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
|
||||||
auth_probe_test_lock()
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn edge_zero_state_len_yields_zero_start_offset() {
|
fn edge_zero_state_len_yields_zero_start_offset() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 44));
|
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 44));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
auth_probe_scan_start_offset(ip, now, 0, 16),
|
auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, 0, 16),
|
||||||
0,
|
0,
|
||||||
"empty map must not produce non-zero scan offset"
|
"empty map must not produce non-zero scan offset"
|
||||||
);
|
);
|
||||||
|
|
@ -23,7 +17,7 @@ fn edge_zero_state_len_yields_zero_start_offset() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn adversarial_large_state_must_allow_start_offset_outside_scan_budget_window() {
|
fn adversarial_large_state_must_allow_start_offset_outside_scan_budget_window() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
let base = Instant::now();
|
let base = Instant::now();
|
||||||
let scan_limit = 16usize;
|
let scan_limit = 16usize;
|
||||||
let state_len = 65_536usize;
|
let state_len = 65_536usize;
|
||||||
|
|
@ -37,7 +31,7 @@ fn adversarial_large_state_must_allow_start_offset_outside_scan_budget_window()
|
||||||
(i & 0xff) as u8,
|
(i & 0xff) as u8,
|
||||||
));
|
));
|
||||||
let now = base + Duration::from_micros(i as u64);
|
let now = base + Duration::from_micros(i as u64);
|
||||||
let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
|
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||||
assert!(
|
assert!(
|
||||||
start < state_len,
|
start < state_len,
|
||||||
"start offset must stay within state length; start={start}, len={state_len}"
|
"start offset must stay within state length; start={start}, len={state_len}"
|
||||||
|
|
@ -56,12 +50,12 @@ fn adversarial_large_state_must_allow_start_offset_outside_scan_budget_window()
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn positive_state_smaller_than_scan_limit_caps_to_state_len() {
|
fn positive_state_smaller_than_scan_limit_caps_to_state_len() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 17));
|
let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 17));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
for state_len in 1..32usize {
|
for state_len in 1..32usize {
|
||||||
let start = auth_probe_scan_start_offset(ip, now, state_len, 64);
|
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, 64);
|
||||||
assert!(
|
assert!(
|
||||||
start < state_len,
|
start < state_len,
|
||||||
"start offset must never exceed state length when scan limit is larger"
|
"start offset must never exceed state length when scan limit is larger"
|
||||||
|
|
@ -71,7 +65,7 @@ fn positive_state_smaller_than_scan_limit_caps_to_state_len() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn light_fuzz_scan_offset_budget_never_exceeds_effective_window() {
|
fn light_fuzz_scan_offset_budget_never_exceeds_effective_window() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
let mut seed = 0x5A41_5356_4C32_3236u64;
|
let mut seed = 0x5A41_5356_4C32_3236u64;
|
||||||
let base = Instant::now();
|
let base = Instant::now();
|
||||||
|
|
||||||
|
|
@ -89,7 +83,7 @@ fn light_fuzz_scan_offset_budget_never_exceeds_effective_window() {
|
||||||
let state_len = ((seed >> 8) as usize % 131_072).saturating_add(1);
|
let state_len = ((seed >> 8) as usize % 131_072).saturating_add(1);
|
||||||
let scan_limit = ((seed >> 32) as usize % 512).saturating_add(1);
|
let scan_limit = ((seed >> 32) as usize % 512).saturating_add(1);
|
||||||
let now = base + Duration::from_nanos(seed & 0xffff);
|
let now = base + Duration::from_nanos(seed & 0xffff);
|
||||||
let start = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
|
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
start < state_len,
|
start < state_len,
|
||||||
|
|
|
||||||
|
|
@ -3,22 +3,16 @@ use std::collections::HashSet;
|
||||||
use std::net::{IpAddr, Ipv4Addr};
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
|
||||||
auth_probe_test_lock()
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn positive_same_ip_moving_time_yields_diverse_scan_offsets() {
|
fn positive_same_ip_moving_time_yields_diverse_scan_offsets() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 77));
|
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 77));
|
||||||
let base = Instant::now();
|
let base = Instant::now();
|
||||||
let mut uniq = HashSet::new();
|
let mut uniq = HashSet::new();
|
||||||
|
|
||||||
for i in 0..512u64 {
|
for i in 0..512u64 {
|
||||||
let now = base + Duration::from_nanos(i);
|
let now = base + Duration::from_nanos(i);
|
||||||
let offset = auth_probe_scan_start_offset(ip, now, 65_536, 16);
|
let offset = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, 65_536, 16);
|
||||||
uniq.insert(offset);
|
uniq.insert(offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -31,7 +25,7 @@ fn positive_same_ip_moving_time_yields_diverse_scan_offsets() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn adversarial_many_ips_same_time_spreads_offsets_without_bias_collapse() {
|
fn adversarial_many_ips_same_time_spreads_offsets_without_bias_collapse() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let mut uniq = HashSet::new();
|
let mut uniq = HashSet::new();
|
||||||
|
|
||||||
|
|
@ -42,7 +36,7 @@ fn adversarial_many_ips_same_time_spreads_offsets_without_bias_collapse() {
|
||||||
i as u8,
|
i as u8,
|
||||||
(255 - (i as u8)),
|
(255 - (i as u8)),
|
||||||
));
|
));
|
||||||
uniq.insert(auth_probe_scan_start_offset(ip, now, 65_536, 16));
|
uniq.insert(auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, 65_536, 16));
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
|
|
@ -54,12 +48,13 @@ fn adversarial_many_ips_same_time_spreads_offsets_without_bias_collapse() {
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn stress_parallel_failure_churn_under_saturation_remains_capped_and_live() {
|
async fn stress_parallel_failure_churn_under_saturation_remains_capped_and_live() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
let mut workers = Vec::new();
|
let mut workers = Vec::new();
|
||||||
for worker in 0..8u8 {
|
for worker in 0..8u8 {
|
||||||
|
let shared = shared.clone();
|
||||||
workers.push(tokio::spawn(async move {
|
workers.push(tokio::spawn(async move {
|
||||||
for i in 0..8192u32 {
|
for i in 0..8192u32 {
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(
|
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||||
|
|
@ -68,7 +63,7 @@ async fn stress_parallel_failure_churn_under_saturation_remains_capped_and_live(
|
||||||
((i >> 8) & 0xff) as u8,
|
((i >> 8) & 0xff) as u8,
|
||||||
(i & 0xff) as u8,
|
(i & 0xff) as u8,
|
||||||
));
|
));
|
||||||
auth_probe_record_failure(ip, start + Duration::from_micros((i % 128) as u64));
|
auth_probe_record_failure_in(shared.as_ref(), ip, start + Duration::from_micros((i % 128) as u64));
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
@ -78,17 +73,17 @@ async fn stress_parallel_failure_churn_under_saturation_remains_capped_and_live(
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
auth_probe_state_map().len() <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
auth_probe_state_for_testing_in_shared(shared.as_ref()).len() <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
||||||
"state must remain hard-capped under parallel saturation churn"
|
"state must remain hard-capped under parallel saturation churn"
|
||||||
);
|
);
|
||||||
|
|
||||||
let probe = IpAddr::V4(Ipv4Addr::new(10, 4, 1, 1));
|
let probe = IpAddr::V4(Ipv4Addr::new(10, 4, 1, 1));
|
||||||
let _ = auth_probe_should_apply_preauth_throttle(probe, start + Duration::from_millis(1));
|
let _ = auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), probe, start + Duration::from_millis(1));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn light_fuzz_scan_offset_stays_within_window_for_randomized_inputs() {
|
fn light_fuzz_scan_offset_stays_within_window_for_randomized_inputs() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
let mut seed = 0xA55A_1357_2468_9BDFu64;
|
let mut seed = 0xA55A_1357_2468_9BDFu64;
|
||||||
let base = Instant::now();
|
let base = Instant::now();
|
||||||
|
|
||||||
|
|
@ -107,7 +102,7 @@ fn light_fuzz_scan_offset_stays_within_window_for_randomized_inputs() {
|
||||||
let scan_limit = ((seed >> 40) as usize % 1024).saturating_add(1);
|
let scan_limit = ((seed >> 40) as usize % 1024).saturating_add(1);
|
||||||
let now = base + Duration::from_nanos(seed & 0x1fff);
|
let now = base + Duration::from_nanos(seed & 0x1fff);
|
||||||
|
|
||||||
let offset = auth_probe_scan_start_offset(ip, now, state_len, scan_limit);
|
let offset = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||||
assert!(
|
assert!(
|
||||||
offset < state_len,
|
offset < state_len,
|
||||||
"scan offset must always remain inside state length"
|
"scan offset must always remain inside state length"
|
||||||
|
|
|
||||||
|
|
@ -36,16 +36,10 @@ fn make_valid_tls_handshake(secret: &[u8], timestamp: u32) -> Vec<u8> {
|
||||||
handshake
|
handshake
|
||||||
}
|
}
|
||||||
|
|
||||||
fn test_lock_guard() -> std::sync::MutexGuard<'static, ()> {
|
|
||||||
auth_probe_test_lock()
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn handshake_baseline_probe_always_falls_back_to_masking() {
|
async fn handshake_baseline_probe_always_falls_back_to_masking() {
|
||||||
let _guard = test_lock_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let cfg = test_config_with_secret_hex("11111111111111111111111111111111");
|
let cfg = test_config_with_secret_hex("11111111111111111111111111111111");
|
||||||
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
|
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
|
||||||
|
|
@ -70,8 +64,8 @@ async fn handshake_baseline_probe_always_falls_back_to_masking() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn handshake_baseline_invalid_secret_triggers_fallback_not_error_response() {
|
async fn handshake_baseline_invalid_secret_triggers_fallback_not_error_response() {
|
||||||
let _guard = test_lock_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let good_secret = [0x22u8; 16];
|
let good_secret = [0x22u8; 16];
|
||||||
let bad_cfg = test_config_with_secret_hex("33333333333333333333333333333333");
|
let bad_cfg = test_config_with_secret_hex("33333333333333333333333333333333");
|
||||||
|
|
@ -97,8 +91,8 @@ async fn handshake_baseline_invalid_secret_triggers_fallback_not_error_response(
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn handshake_baseline_auth_probe_streak_increments_per_ip() {
|
async fn handshake_baseline_auth_probe_streak_increments_per_ip() {
|
||||||
let _guard = test_lock_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let cfg = test_config_with_secret_hex("44444444444444444444444444444444");
|
let cfg = test_config_with_secret_hex("44444444444444444444444444444444");
|
||||||
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
|
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
|
||||||
|
|
@ -109,7 +103,7 @@ async fn handshake_baseline_auth_probe_streak_increments_per_ip() {
|
||||||
let bad_probe = b"\x16\x03\x01\x00";
|
let bad_probe = b"\x16\x03\x01\x00";
|
||||||
|
|
||||||
for expected in 1..=3 {
|
for expected in 1..=3 {
|
||||||
let res = handle_tls_handshake(
|
let res = handle_tls_handshake_with_shared(
|
||||||
bad_probe,
|
bad_probe,
|
||||||
tokio::io::empty(),
|
tokio::io::empty(),
|
||||||
tokio::io::sink(),
|
tokio::io::sink(),
|
||||||
|
|
@ -118,43 +112,44 @@ async fn handshake_baseline_auth_probe_streak_increments_per_ip() {
|
||||||
&replay_checker,
|
&replay_checker,
|
||||||
&rng,
|
&rng,
|
||||||
None,
|
None,
|
||||||
|
shared.as_ref(),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
assert!(matches!(res, HandshakeResult::BadClient { .. }));
|
assert!(matches!(res, HandshakeResult::BadClient { .. }));
|
||||||
assert_eq!(auth_probe_fail_streak_for_testing(peer.ip()), Some(expected));
|
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()), Some(expected));
|
||||||
assert_eq!(auth_probe_fail_streak_for_testing(untouched_ip), None);
|
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), untouched_ip), None);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn handshake_baseline_saturation_fires_at_compile_time_threshold() {
|
fn handshake_baseline_saturation_fires_at_compile_time_threshold() {
|
||||||
let _guard = test_lock_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 33));
|
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 33));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS.saturating_sub(1) {
|
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS.saturating_sub(1) {
|
||||||
auth_probe_record_failure(ip, now);
|
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||||
}
|
}
|
||||||
assert!(!auth_probe_is_throttled(ip, now));
|
assert!(!auth_probe_is_throttled_in(shared.as_ref(), ip, now));
|
||||||
|
|
||||||
auth_probe_record_failure(ip, now);
|
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||||
assert!(auth_probe_is_throttled(ip, now));
|
assert!(auth_probe_is_throttled_in(shared.as_ref(), ip, now));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn handshake_baseline_repeated_probes_streak_monotonic() {
|
fn handshake_baseline_repeated_probes_streak_monotonic() {
|
||||||
let _guard = test_lock_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 42));
|
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 42));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let mut prev = 0u32;
|
let mut prev = 0u32;
|
||||||
|
|
||||||
for _ in 0..100 {
|
for _ in 0..100 {
|
||||||
auth_probe_record_failure(ip, now);
|
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||||
let current = auth_probe_fail_streak_for_testing(ip).unwrap_or(0);
|
let current = auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip).unwrap_or(0);
|
||||||
assert!(current >= prev, "streak must be monotonic");
|
assert!(current >= prev, "streak must be monotonic");
|
||||||
prev = current;
|
prev = current;
|
||||||
}
|
}
|
||||||
|
|
@ -162,14 +157,14 @@ fn handshake_baseline_repeated_probes_streak_monotonic() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn handshake_baseline_throttled_ip_incurs_backoff_delay() {
|
fn handshake_baseline_throttled_ip_incurs_backoff_delay() {
|
||||||
let _guard = test_lock_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 44));
|
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 44));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||||
auth_probe_record_failure(ip, now);
|
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||||
}
|
}
|
||||||
|
|
||||||
let delay = auth_probe_backoff(AUTH_PROBE_BACKOFF_START_FAILS);
|
let delay = auth_probe_backoff(AUTH_PROBE_BACKOFF_START_FAILS);
|
||||||
|
|
@ -178,14 +173,14 @@ fn handshake_baseline_throttled_ip_incurs_backoff_delay() {
|
||||||
let before_expiry = now + delay.saturating_sub(Duration::from_millis(1));
|
let before_expiry = now + delay.saturating_sub(Duration::from_millis(1));
|
||||||
let after_expiry = now + delay + Duration::from_millis(1);
|
let after_expiry = now + delay + Duration::from_millis(1);
|
||||||
|
|
||||||
assert!(auth_probe_is_throttled(ip, before_expiry));
|
assert!(auth_probe_is_throttled_in(shared.as_ref(), ip, before_expiry));
|
||||||
assert!(!auth_probe_is_throttled(ip, after_expiry));
|
assert!(!auth_probe_is_throttled_in(shared.as_ref(), ip, after_expiry));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn handshake_baseline_malformed_probe_frames_fail_closed_to_masking() {
|
async fn handshake_baseline_malformed_probe_frames_fail_closed_to_masking() {
|
||||||
let _guard = test_lock_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let cfg = test_config_with_secret_hex("55555555555555555555555555555555");
|
let cfg = test_config_with_secret_hex("55555555555555555555555555555555");
|
||||||
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
|
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
|
||||||
|
|
|
||||||
|
|
@ -67,16 +67,10 @@ fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
||||||
cfg
|
cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
fn auth_probe_test_guard() -> MutexGuard<'static, ()> {
|
|
||||||
auth_probe_test_lock()
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_handshake_duplicate_digest_is_replayed_on_second_attempt() {
|
async fn mtproto_handshake_duplicate_digest_is_replayed_on_second_attempt() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "11223344556677889900aabbccddeeff";
|
let secret_hex = "11223344556677889900aabbccddeeff";
|
||||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||||
|
|
@ -110,13 +104,13 @@ async fn mtproto_handshake_duplicate_digest_is_replayed_on_second_attempt() {
|
||||||
.await;
|
.await;
|
||||||
assert!(matches!(second, HandshakeResult::BadClient { .. }));
|
assert!(matches!(second, HandshakeResult::BadClient { .. }));
|
||||||
|
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_handshake_fuzz_corpus_never_panics_and_stays_fail_closed() {
|
async fn mtproto_handshake_fuzz_corpus_never_panics_and_stays_fail_closed() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "00112233445566778899aabbccddeeff";
|
let secret_hex = "00112233445566778899aabbccddeeff";
|
||||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
|
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
|
||||||
|
|
@ -178,13 +172,13 @@ async fn mtproto_handshake_fuzz_corpus_never_panics_and_stays_fail_closed() {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_handshake_mixed_corpus_never_panics_and_exact_duplicates_are_rejected() {
|
async fn mtproto_handshake_mixed_corpus_never_panics_and_exact_duplicates_are_rejected() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "99887766554433221100ffeeddccbbaa";
|
let secret_hex = "99887766554433221100ffeeddccbbaa";
|
||||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 4);
|
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 4);
|
||||||
|
|
@ -274,5 +268,5 @@ async fn mtproto_handshake_mixed_corpus_never_panics_and_exact_duplicates_are_re
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -11,12 +11,6 @@ use tokio::sync::Barrier;
|
||||||
|
|
||||||
// --- Helpers ---
|
// --- Helpers ---
|
||||||
|
|
||||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
|
||||||
auth_probe_test_lock()
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
||||||
let mut cfg = ProxyConfig::default();
|
let mut cfg = ProxyConfig::default();
|
||||||
cfg.access.users.clear();
|
cfg.access.users.clear();
|
||||||
|
|
@ -164,8 +158,8 @@ fn make_valid_tls_client_hello_with_sni_and_alpn(
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn server_hello_delay_bypassed_if_max_is_zero_despite_high_min() {
|
async fn server_hello_delay_bypassed_if_max_is_zero_despite_high_min() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret = [0x1Au8; 16];
|
let secret = [0x1Au8; 16];
|
||||||
let mut config = test_config_with_secret_hex("1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a");
|
let mut config = test_config_with_secret_hex("1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a");
|
||||||
|
|
@ -201,10 +195,10 @@ async fn server_hello_delay_bypassed_if_max_is_zero_despite_high_min() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn auth_probe_backoff_extreme_fail_streak_clamps_safely() {
|
fn auth_probe_backoff_extreme_fail_streak_clamps_safely() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let state = auth_probe_state_map();
|
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 99));
|
let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 99));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
|
|
@ -217,7 +211,7 @@ fn auth_probe_backoff_extreme_fail_streak_clamps_safely() {
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
auth_probe_record_failure_with_state(&state, peer_ip, now);
|
auth_probe_record_failure_with_state_in(shared.as_ref(), &state, peer_ip, now);
|
||||||
|
|
||||||
let updated = state.get(&peer_ip).unwrap();
|
let updated = state.get(&peer_ip).unwrap();
|
||||||
assert_eq!(updated.fail_streak, u32::MAX);
|
assert_eq!(updated.fail_streak, u32::MAX);
|
||||||
|
|
@ -270,8 +264,8 @@ fn generate_tg_nonce_cryptographic_uniqueness_and_entropy() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_multi_user_decryption_isolation() {
|
async fn mtproto_multi_user_decryption_isolation() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let mut config = ProxyConfig::default();
|
let mut config = ProxyConfig::default();
|
||||||
config.general.modes.secure = true;
|
config.general.modes.secure = true;
|
||||||
|
|
@ -323,10 +317,8 @@ async fn mtproto_multi_user_decryption_isolation() {
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn invalid_secret_warning_lock_contention_and_bound() {
|
async fn invalid_secret_warning_lock_contention_and_bound() {
|
||||||
let _guard = warned_secrets_test_lock()
|
let shared = ProxySharedState::new();
|
||||||
.lock()
|
clear_warned_secrets_for_testing_in_shared(shared.as_ref());
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
|
||||||
clear_warned_secrets_for_testing();
|
|
||||||
|
|
||||||
let tasks = 50;
|
let tasks = 50;
|
||||||
let iterations_per_task = 100;
|
let iterations_per_task = 100;
|
||||||
|
|
@ -335,11 +327,12 @@ async fn invalid_secret_warning_lock_contention_and_bound() {
|
||||||
|
|
||||||
for t in 0..tasks {
|
for t in 0..tasks {
|
||||||
let b = barrier.clone();
|
let b = barrier.clone();
|
||||||
|
let shared = shared.clone();
|
||||||
handles.push(tokio::spawn(async move {
|
handles.push(tokio::spawn(async move {
|
||||||
b.wait().await;
|
b.wait().await;
|
||||||
for i in 0..iterations_per_task {
|
for i in 0..iterations_per_task {
|
||||||
let user_name = format!("contention_user_{}_{}", t, i);
|
let user_name = format!("contention_user_{}_{}", t, i);
|
||||||
warn_invalid_secret_once(&user_name, "invalid_hex", ACCESS_SECRET_BYTES, None);
|
warn_invalid_secret_once_in(shared.as_ref(), &user_name, "invalid_hex", ACCESS_SECRET_BYTES, None);
|
||||||
}
|
}
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
@ -348,7 +341,7 @@ async fn invalid_secret_warning_lock_contention_and_bound() {
|
||||||
handle.await.unwrap();
|
handle.await.unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
let warned = INVALID_SECRET_WARNED.get().unwrap();
|
let warned = warned_secrets_for_testing_in_shared(shared.as_ref());
|
||||||
let guard = warned
|
let guard = warned
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
|
@ -362,8 +355,8 @@ async fn invalid_secret_warning_lock_contention_and_bound() {
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn mtproto_strict_concurrent_replay_race_condition() {
|
async fn mtproto_strict_concurrent_replay_race_condition() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret_hex = "4A4A4A4A4A4A4A4A4A4A4A4A4A4A4A4A";
|
let secret_hex = "4A4A4A4A4A4A4A4A4A4A4A4A4A4A4A4A";
|
||||||
let config = Arc::new(test_config_with_secret_hex(secret_hex));
|
let config = Arc::new(test_config_with_secret_hex(secret_hex));
|
||||||
|
|
@ -428,8 +421,8 @@ async fn mtproto_strict_concurrent_replay_race_condition() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn tls_alpn_zero_length_protocol_handled_safely() {
|
async fn tls_alpn_zero_length_protocol_handled_safely() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret = [0x5Bu8; 16];
|
let secret = [0x5Bu8; 16];
|
||||||
let mut config = test_config_with_secret_hex("5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b");
|
let mut config = test_config_with_secret_hex("5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b");
|
||||||
|
|
@ -461,8 +454,8 @@ async fn tls_alpn_zero_length_protocol_handled_safely() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn tls_sni_massive_hostname_does_not_panic() {
|
async fn tls_sni_massive_hostname_does_not_panic() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret = [0x6Cu8; 16];
|
let secret = [0x6Cu8; 16];
|
||||||
let config = test_config_with_secret_hex("6c6c6c6c6c6c6c6c6c6c6c6c6c6c6c6c");
|
let config = test_config_with_secret_hex("6c6c6c6c6c6c6c6c6c6c6c6c6c6c6c6c");
|
||||||
|
|
@ -497,8 +490,8 @@ async fn tls_sni_massive_hostname_does_not_panic() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn tls_progressive_truncation_fuzzing_no_panics() {
|
async fn tls_progressive_truncation_fuzzing_no_panics() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret = [0x7Du8; 16];
|
let secret = [0x7Du8; 16];
|
||||||
let config = test_config_with_secret_hex("7d7d7d7d7d7d7d7d7d7d7d7d7d7d7d7d");
|
let config = test_config_with_secret_hex("7d7d7d7d7d7d7d7d7d7d7d7d7d7d7d7d");
|
||||||
|
|
@ -535,8 +528,8 @@ async fn tls_progressive_truncation_fuzzing_no_panics() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_pure_entropy_fuzzing_no_panics() {
|
async fn mtproto_pure_entropy_fuzzing_no_panics() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let config = test_config_with_secret_hex("8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e");
|
let config = test_config_with_secret_hex("8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e8e");
|
||||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||||
|
|
@ -569,10 +562,8 @@ async fn mtproto_pure_entropy_fuzzing_no_panics() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn decode_user_secret_odd_length_hex_rejection() {
|
fn decode_user_secret_odd_length_hex_rejection() {
|
||||||
let _guard = warned_secrets_test_lock()
|
let shared = ProxySharedState::new();
|
||||||
.lock()
|
clear_warned_secrets_for_testing_in_shared(shared.as_ref());
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
|
||||||
clear_warned_secrets_for_testing();
|
|
||||||
|
|
||||||
let mut config = ProxyConfig::default();
|
let mut config = ProxyConfig::default();
|
||||||
config.access.users.clear();
|
config.access.users.clear();
|
||||||
|
|
@ -581,7 +572,7 @@ fn decode_user_secret_odd_length_hex_rejection() {
|
||||||
"1234567890123456789012345678901".to_string(),
|
"1234567890123456789012345678901".to_string(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let decoded = decode_user_secrets(&config, None);
|
let decoded = decode_user_secrets_in(shared.as_ref(), &config, None);
|
||||||
assert!(
|
assert!(
|
||||||
decoded.is_empty(),
|
decoded.is_empty(),
|
||||||
"Odd-length hex string must be gracefully rejected by hex::decode without unwrapping"
|
"Odd-length hex string must be gracefully rejected by hex::decode without unwrapping"
|
||||||
|
|
@ -590,10 +581,10 @@ fn decode_user_secret_odd_length_hex_rejection() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() {
|
fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let state = auth_probe_state_map();
|
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 112));
|
let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 112));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
|
|
@ -608,7 +599,7 @@ fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() {
|
||||||
);
|
);
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut guard = auth_probe_saturation_state_lock();
|
let mut guard = auth_probe_saturation_state_lock_for_testing_in_shared(shared.as_ref());
|
||||||
*guard = Some(AuthProbeSaturationState {
|
*guard = Some(AuthProbeSaturationState {
|
||||||
fail_streak: AUTH_PROBE_BACKOFF_START_FAILS,
|
fail_streak: AUTH_PROBE_BACKOFF_START_FAILS,
|
||||||
blocked_until: now + Duration::from_secs(5),
|
blocked_until: now + Duration::from_secs(5),
|
||||||
|
|
@ -616,7 +607,7 @@ fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let is_throttled = auth_probe_should_apply_preauth_throttle(peer_ip, now);
|
let is_throttled = auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), peer_ip, now);
|
||||||
assert!(
|
assert!(
|
||||||
is_throttled,
|
is_throttled,
|
||||||
"A peer with a pre-existing high fail streak must be immediately throttled when saturation begins, receiving no unearned grace period"
|
"A peer with a pre-existing high fail streak must be immediately throttled when saturation begins, receiving no unearned grace period"
|
||||||
|
|
@ -625,21 +616,21 @@ fn saturation_grace_pre_existing_high_fail_streak_immediate_throttle() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn auth_probe_saturation_note_resets_retention_window() {
|
fn auth_probe_saturation_note_resets_retention_window() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let base_time = Instant::now();
|
let base_time = Instant::now();
|
||||||
|
|
||||||
auth_probe_note_saturation(base_time);
|
auth_probe_note_saturation_in(shared.as_ref(), base_time);
|
||||||
let later = base_time + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS - 1);
|
let later = base_time + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS - 1);
|
||||||
auth_probe_note_saturation(later);
|
auth_probe_note_saturation_in(shared.as_ref(), later);
|
||||||
|
|
||||||
let check_time = base_time + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS + 5);
|
let check_time = base_time + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS + 5);
|
||||||
|
|
||||||
// This call may return false if backoff has elapsed, but it must not clear
|
// This call may return false if backoff has elapsed, but it must not clear
|
||||||
// the saturation state because `later` refreshed last_seen.
|
// the saturation state because `later` refreshed last_seen.
|
||||||
let _ = auth_probe_saturation_is_throttled_at_for_testing(check_time);
|
let _ = auth_probe_saturation_is_throttled_at_for_testing_in_shared(shared.as_ref(), check_time);
|
||||||
let guard = auth_probe_saturation_state_lock();
|
let guard = auth_probe_saturation_state_lock_for_testing_in_shared(shared.as_ref());
|
||||||
assert!(
|
assert!(
|
||||||
guard.is_some(),
|
guard.is_some(),
|
||||||
"Ongoing saturation notes must refresh last_seen so saturation state remains retained past the original window"
|
"Ongoing saturation notes must refresh last_seen so saturation state remains retained past the original window"
|
||||||
|
|
|
||||||
|
|
@ -6,12 +6,6 @@ use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tokio::sync::Barrier;
|
use tokio::sync::Barrier;
|
||||||
|
|
||||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
|
||||||
auth_probe_test_lock()
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
||||||
let mut cfg = ProxyConfig::default();
|
let mut cfg = ProxyConfig::default();
|
||||||
cfg.access.users.clear();
|
cfg.access.users.clear();
|
||||||
|
|
@ -127,8 +121,8 @@ fn make_valid_mtproto_handshake(
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn tls_alpn_reject_does_not_pollute_replay_cache() {
|
async fn tls_alpn_reject_does_not_pollute_replay_cache() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let secret = [0x11u8; 16];
|
let secret = [0x11u8; 16];
|
||||||
let mut config = test_config_with_secret_hex("11111111111111111111111111111111");
|
let mut config = test_config_with_secret_hex("11111111111111111111111111111111");
|
||||||
|
|
@ -164,8 +158,8 @@ async fn tls_alpn_reject_does_not_pollute_replay_cache() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn tls_truncated_session_id_len_fails_closed_without_panic() {
|
async fn tls_truncated_session_id_len_fails_closed_without_panic() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let config = test_config_with_secret_hex("33333333333333333333333333333333");
|
let config = test_config_with_secret_hex("33333333333333333333333333333333");
|
||||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||||
|
|
@ -193,10 +187,10 @@ async fn tls_truncated_session_id_len_fails_closed_without_panic() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn auth_probe_eviction_identical_timestamps_keeps_map_bounded() {
|
fn auth_probe_eviction_identical_timestamps_keeps_map_bounded() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let state = auth_probe_state_map();
|
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
let same = Instant::now();
|
let same = Instant::now();
|
||||||
|
|
||||||
for i in 0..AUTH_PROBE_TRACK_MAX_ENTRIES {
|
for i in 0..AUTH_PROBE_TRACK_MAX_ENTRIES {
|
||||||
|
|
@ -212,7 +206,7 @@ fn auth_probe_eviction_identical_timestamps_keeps_map_bounded() {
|
||||||
}
|
}
|
||||||
|
|
||||||
let new_ip = IpAddr::V4(Ipv4Addr::new(192, 168, 21, 21));
|
let new_ip = IpAddr::V4(Ipv4Addr::new(192, 168, 21, 21));
|
||||||
auth_probe_record_failure_with_state(state, new_ip, same + Duration::from_millis(1));
|
auth_probe_record_failure_with_state_in(shared.as_ref(), state, new_ip, same + Duration::from_millis(1));
|
||||||
|
|
||||||
assert_eq!(state.len(), AUTH_PROBE_TRACK_MAX_ENTRIES);
|
assert_eq!(state.len(), AUTH_PROBE_TRACK_MAX_ENTRIES);
|
||||||
assert!(state.contains_key(&new_ip));
|
assert!(state.contains_key(&new_ip));
|
||||||
|
|
@ -220,21 +214,21 @@ fn auth_probe_eviction_identical_timestamps_keeps_map_bounded() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn clear_auth_probe_state_recovers_from_poisoned_saturation_lock() {
|
fn clear_auth_probe_state_recovers_from_poisoned_saturation_lock() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let saturation = auth_probe_saturation_state();
|
let shared_for_poison = shared.clone();
|
||||||
let poison_thread = std::thread::spawn(move || {
|
let poison_thread = std::thread::spawn(move || {
|
||||||
let _hold = saturation
|
let _hold = auth_probe_saturation_state_for_testing_in_shared(shared_for_poison.as_ref())
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
panic!("intentional poison for regression coverage");
|
panic!("intentional poison for regression coverage");
|
||||||
});
|
});
|
||||||
let _ = poison_thread.join();
|
let _ = poison_thread.join();
|
||||||
|
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let guard = auth_probe_saturation_state()
|
let guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref())
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
assert!(guard.is_none());
|
assert!(guard.is_none());
|
||||||
|
|
@ -242,12 +236,9 @@ fn clear_auth_probe_state_recovers_from_poisoned_saturation_lock() {
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn mtproto_invalid_length_secret_is_ignored_and_valid_user_still_auths() {
|
async fn mtproto_invalid_length_secret_is_ignored_and_valid_user_still_auths() {
|
||||||
let _probe_guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
let _warn_guard = warned_secrets_test_lock()
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
.lock()
|
clear_warned_secrets_for_testing_in_shared(shared.as_ref());
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
|
||||||
clear_auth_probe_state_for_testing();
|
|
||||||
clear_warned_secrets_for_testing();
|
|
||||||
|
|
||||||
let mut config = ProxyConfig::default();
|
let mut config = ProxyConfig::default();
|
||||||
config.general.modes.secure = true;
|
config.general.modes.secure = true;
|
||||||
|
|
@ -285,14 +276,14 @@ async fn mtproto_invalid_length_secret_is_ignored_and_valid_user_still_auths() {
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() {
|
async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 80));
|
let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 80));
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut guard = auth_probe_saturation_state()
|
let mut guard = auth_probe_saturation_state_for_testing_in_shared(shared.as_ref())
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
*guard = Some(AuthProbeSaturationState {
|
*guard = Some(AuthProbeSaturationState {
|
||||||
|
|
@ -302,7 +293,7 @@ async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let state = auth_probe_state_map();
|
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
state.insert(
|
state.insert(
|
||||||
peer_ip,
|
peer_ip,
|
||||||
AuthProbeState {
|
AuthProbeState {
|
||||||
|
|
@ -318,9 +309,10 @@ async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() {
|
||||||
|
|
||||||
for _ in 0..tasks {
|
for _ in 0..tasks {
|
||||||
let b = barrier.clone();
|
let b = barrier.clone();
|
||||||
|
let shared = shared.clone();
|
||||||
handles.push(tokio::spawn(async move {
|
handles.push(tokio::spawn(async move {
|
||||||
b.wait().await;
|
b.wait().await;
|
||||||
auth_probe_record_failure(peer_ip, Instant::now());
|
auth_probe_record_failure_in(shared.as_ref(), peer_ip, Instant::now());
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -333,7 +325,7 @@ async fn saturation_grace_exhaustion_under_concurrency_keeps_peer_throttled() {
|
||||||
final_state.fail_streak
|
final_state.fail_streak
|
||||||
>= AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS
|
>= AUTH_PROBE_BACKOFF_START_FAILS + AUTH_PROBE_SATURATION_GRACE_FAILS
|
||||||
);
|
);
|
||||||
assert!(auth_probe_should_apply_preauth_throttle(
|
assert!(auth_probe_should_apply_preauth_throttle_in(shared.as_ref(),
|
||||||
peer_ip,
|
peer_ip,
|
||||||
Instant::now()
|
Instant::now()
|
||||||
));
|
));
|
||||||
|
|
|
||||||
|
|
@ -1,46 +1,39 @@
|
||||||
use super::*;
|
use super::*;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
fn poison_saturation_mutex(shared: &ProxySharedState) {
|
||||||
auth_probe_test_lock()
|
let saturation = auth_probe_saturation_state_for_testing_in_shared(shared);
|
||||||
.lock()
|
let _ = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| {
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn poison_saturation_mutex() {
|
|
||||||
let saturation = auth_probe_saturation_state();
|
|
||||||
let poison_thread = std::thread::spawn(move || {
|
|
||||||
let _guard = saturation
|
let _guard = saturation
|
||||||
.lock()
|
.lock()
|
||||||
.expect("saturation mutex must be lockable for poison setup");
|
.expect("saturation mutex must be lockable for poison setup");
|
||||||
panic!("intentional poison for saturation mutex resilience test");
|
panic!("intentional poison for saturation mutex resilience test");
|
||||||
});
|
}));
|
||||||
let _ = poison_thread.join();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn auth_probe_saturation_note_recovers_after_mutex_poison() {
|
fn auth_probe_saturation_note_recovers_after_mutex_poison() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
poison_saturation_mutex();
|
poison_saturation_mutex(shared.as_ref());
|
||||||
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
auth_probe_note_saturation(now);
|
auth_probe_note_saturation_in(shared.as_ref(), now);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
auth_probe_saturation_is_throttled_at_for_testing(now),
|
auth_probe_saturation_is_throttled_at_for_testing_in_shared(shared.as_ref(), now),
|
||||||
"poisoned saturation mutex must not disable saturation throttling"
|
"poisoned saturation mutex must not disable saturation throttling"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn auth_probe_saturation_check_recovers_after_mutex_poison() {
|
fn auth_probe_saturation_check_recovers_after_mutex_poison() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
poison_saturation_mutex();
|
poison_saturation_mutex(shared.as_ref());
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut guard = auth_probe_saturation_state_lock();
|
let mut guard = auth_probe_saturation_state_lock_for_testing_in_shared(shared.as_ref());
|
||||||
*guard = Some(AuthProbeSaturationState {
|
*guard = Some(AuthProbeSaturationState {
|
||||||
fail_streak: AUTH_PROBE_BACKOFF_START_FAILS,
|
fail_streak: AUTH_PROBE_BACKOFF_START_FAILS,
|
||||||
blocked_until: Instant::now() + Duration::from_millis(10),
|
blocked_until: Instant::now() + Duration::from_millis(10),
|
||||||
|
|
@ -49,23 +42,23 @@ fn auth_probe_saturation_check_recovers_after_mutex_poison() {
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
auth_probe_saturation_is_throttled_for_testing(),
|
auth_probe_saturation_is_throttled_for_testing_in_shared(shared.as_ref()),
|
||||||
"throttle check must recover poisoned saturation mutex and stay fail-closed"
|
"throttle check must recover poisoned saturation mutex and stay fail-closed"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn clear_auth_probe_state_clears_saturation_even_if_poisoned() {
|
fn clear_auth_probe_state_clears_saturation_even_if_poisoned() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
poison_saturation_mutex();
|
poison_saturation_mutex(shared.as_ref());
|
||||||
|
|
||||||
auth_probe_note_saturation(Instant::now());
|
auth_probe_note_saturation_in(shared.as_ref(), Instant::now());
|
||||||
assert!(auth_probe_saturation_is_throttled_for_testing());
|
assert!(auth_probe_saturation_is_throttled_for_testing_in_shared(shared.as_ref()));
|
||||||
|
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
assert!(
|
assert!(
|
||||||
!auth_probe_saturation_is_throttled_for_testing(),
|
!auth_probe_saturation_is_throttled_for_testing_in_shared(shared.as_ref()),
|
||||||
"clear helper must clear saturation state even after poison"
|
"clear helper must clear saturation state even after poison"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -4,12 +4,6 @@ use crate::protocol::constants::{ProtoTag, TLS_RECORD_HANDSHAKE, TLS_VERSION};
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
|
||||||
auth_probe_test_lock()
|
|
||||||
.lock()
|
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn make_valid_mtproto_handshake(
|
fn make_valid_mtproto_handshake(
|
||||||
secret_hex: &str,
|
secret_hex: &str,
|
||||||
proto_tag: ProtoTag,
|
proto_tag: ProtoTag,
|
||||||
|
|
@ -149,8 +143,8 @@ fn median_ns(samples: &mut [u128]) -> u128 {
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "manual benchmark: timing-sensitive and host-dependent"]
|
#[ignore = "manual benchmark: timing-sensitive and host-dependent"]
|
||||||
async fn mtproto_user_scan_timing_manual_benchmark() {
|
async fn mtproto_user_scan_timing_manual_benchmark() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
clear_auth_probe_state_for_testing();
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
const DECOY_USERS: usize = 8_000;
|
const DECOY_USERS: usize = 8_000;
|
||||||
const ITERATIONS: usize = 250;
|
const ITERATIONS: usize = 250;
|
||||||
|
|
@ -243,7 +237,7 @@ async fn mtproto_user_scan_timing_manual_benchmark() {
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
#[ignore = "manual benchmark: timing-sensitive and host-dependent"]
|
#[ignore = "manual benchmark: timing-sensitive and host-dependent"]
|
||||||
async fn tls_sni_preferred_vs_no_sni_fallback_manual_benchmark() {
|
async fn tls_sni_preferred_vs_no_sni_fallback_manual_benchmark() {
|
||||||
let _guard = auth_probe_test_guard();
|
let shared = ProxySharedState::new();
|
||||||
|
|
||||||
const DECOY_USERS: usize = 8_000;
|
const DECOY_USERS: usize = 8_000;
|
||||||
const ITERATIONS: usize = 250;
|
const ITERATIONS: usize = 250;
|
||||||
|
|
@ -281,7 +275,7 @@ async fn tls_sni_preferred_vs_no_sni_fallback_manual_benchmark() {
|
||||||
let no_sni = make_valid_tls_handshake(&target_secret, (i as u32).wrapping_add(10_000));
|
let no_sni = make_valid_tls_handshake(&target_secret, (i as u32).wrapping_add(10_000));
|
||||||
|
|
||||||
let started_sni = Instant::now();
|
let started_sni = Instant::now();
|
||||||
let sni_secrets = decode_user_secrets(&config, Some(preferred_user));
|
let sni_secrets = decode_user_secrets_in(shared.as_ref(), &config, Some(preferred_user));
|
||||||
let sni_result = tls::validate_tls_handshake_with_replay_window(
|
let sni_result = tls::validate_tls_handshake_with_replay_window(
|
||||||
&with_sni,
|
&with_sni,
|
||||||
&sni_secrets,
|
&sni_secrets,
|
||||||
|
|
@ -292,7 +286,7 @@ async fn tls_sni_preferred_vs_no_sni_fallback_manual_benchmark() {
|
||||||
assert!(sni_result.is_some());
|
assert!(sni_result.is_some());
|
||||||
|
|
||||||
let started_no_sni = Instant::now();
|
let started_no_sni = Instant::now();
|
||||||
let no_sni_secrets = decode_user_secrets(&config, None);
|
let no_sni_secrets = decode_user_secrets_in(shared.as_ref(), &config, None);
|
||||||
let no_sni_result = tls::validate_tls_handshake_with_replay_window(
|
let no_sni_result = tls::validate_tls_handshake_with_replay_window(
|
||||||
&no_sni,
|
&no_sni,
|
||||||
&no_sni_secrets,
|
&no_sni_secrets,
|
||||||
|
|
|
||||||
|
|
@ -3,36 +3,39 @@ use std::time::{Duration, Instant};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn middle_relay_baseline_public_api_idle_roundtrip_contract() {
|
fn middle_relay_baseline_public_api_idle_roundtrip_contract() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
assert!(mark_relay_idle_candidate(7001));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 7001));
|
||||||
assert_eq!(oldest_relay_idle_candidate(), Some(7001));
|
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7001));
|
||||||
|
|
||||||
clear_relay_idle_candidate(7001);
|
clear_relay_idle_candidate_for_testing(shared.as_ref(), 7001);
|
||||||
assert_ne!(oldest_relay_idle_candidate(), Some(7001));
|
assert_ne!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7001));
|
||||||
|
|
||||||
assert!(mark_relay_idle_candidate(7001));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 7001));
|
||||||
assert_eq!(oldest_relay_idle_candidate(), Some(7001));
|
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7001));
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn middle_relay_baseline_public_api_desync_window_contract() {
|
fn middle_relay_baseline_public_api_desync_window_contract() {
|
||||||
let _guard = desync_dedup_test_lock()
|
let shared = ProxySharedState::new();
|
||||||
.lock()
|
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
|
||||||
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
|
||||||
clear_desync_dedup_for_testing();
|
|
||||||
|
|
||||||
let key = 0xDEAD_BEEF_0000_0001u64;
|
let key = 0xDEAD_BEEF_0000_0001u64;
|
||||||
let t0 = Instant::now();
|
let t0 = Instant::now();
|
||||||
|
|
||||||
assert!(should_emit_full_desync(key, false, t0));
|
assert!(should_emit_full_desync_for_testing(shared.as_ref(), key, false, t0));
|
||||||
assert!(!should_emit_full_desync(key, false, t0 + Duration::from_secs(1)));
|
assert!(!should_emit_full_desync_for_testing(
|
||||||
|
shared.as_ref(),
|
||||||
|
key,
|
||||||
|
false,
|
||||||
|
t0 + Duration::from_secs(1)
|
||||||
|
));
|
||||||
|
|
||||||
let t1 = t0 + DESYNC_DEDUP_WINDOW + Duration::from_millis(10);
|
let t1 = t0 + DESYNC_DEDUP_WINDOW + Duration::from_millis(10);
|
||||||
assert!(should_emit_full_desync(key, false, t1));
|
assert!(should_emit_full_desync_for_testing(shared.as_ref(), key, false, t1));
|
||||||
|
|
||||||
clear_desync_dedup_for_testing();
|
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -5,22 +5,20 @@ use std::thread;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn desync_all_full_bypass_does_not_initialize_or_grow_dedup_cache() {
|
fn desync_all_full_bypass_does_not_initialize_or_grow_dedup_cache() {
|
||||||
let _guard = desync_dedup_test_lock()
|
let shared = ProxySharedState::new();
|
||||||
.lock()
|
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
|
||||||
.expect("desync dedup test lock must be available");
|
|
||||||
clear_desync_dedup_for_testing();
|
|
||||||
|
|
||||||
let initial_len = DESYNC_DEDUP.get().map(|dedup| dedup.len()).unwrap_or(0);
|
let initial_len = desync_dedup_len_for_testing(shared.as_ref());
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
|
|
||||||
for i in 0..20_000u64 {
|
for i in 0..20_000u64 {
|
||||||
assert!(
|
assert!(
|
||||||
should_emit_full_desync(0xD35E_D000_0000_0000u64 ^ i, true, now),
|
should_emit_full_desync_for_testing(shared.as_ref(), 0xD35E_D000_0000_0000u64 ^ i, true, now),
|
||||||
"desync_all_full path must always emit"
|
"desync_all_full path must always emit"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let after_len = DESYNC_DEDUP.get().map(|dedup| dedup.len()).unwrap_or(0);
|
let after_len = desync_dedup_len_for_testing(shared.as_ref());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
after_len, initial_len,
|
after_len, initial_len,
|
||||||
"desync_all_full bypass must not allocate or accumulate dedup entries"
|
"desync_all_full bypass must not allocate or accumulate dedup entries"
|
||||||
|
|
@ -29,39 +27,34 @@ fn desync_all_full_bypass_does_not_initialize_or_grow_dedup_cache() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn desync_all_full_bypass_keeps_existing_dedup_entries_unchanged() {
|
fn desync_all_full_bypass_keeps_existing_dedup_entries_unchanged() {
|
||||||
let _guard = desync_dedup_test_lock()
|
let shared = ProxySharedState::new();
|
||||||
.lock()
|
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
|
||||||
.expect("desync dedup test lock must be available");
|
|
||||||
clear_desync_dedup_for_testing();
|
|
||||||
|
|
||||||
let dedup = DESYNC_DEDUP.get_or_init(DashMap::new);
|
|
||||||
let seed_time = Instant::now() - Duration::from_secs(7);
|
let seed_time = Instant::now() - Duration::from_secs(7);
|
||||||
dedup.insert(0xAAAABBBBCCCCDDDD, seed_time);
|
desync_dedup_insert_for_testing(shared.as_ref(), 0xAAAABBBBCCCCDDDD, seed_time);
|
||||||
dedup.insert(0x1111222233334444, seed_time);
|
desync_dedup_insert_for_testing(shared.as_ref(), 0x1111222233334444, seed_time);
|
||||||
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
for i in 0..2048u64 {
|
for i in 0..2048u64 {
|
||||||
assert!(
|
assert!(
|
||||||
should_emit_full_desync(0xF011_F000_0000_0000u64 ^ i, true, now),
|
should_emit_full_desync_for_testing(shared.as_ref(), 0xF011_F000_0000_0000u64 ^ i, true, now),
|
||||||
"desync_all_full must bypass suppression and dedup refresh"
|
"desync_all_full must bypass suppression and dedup refresh"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
dedup.len(),
|
desync_dedup_len_for_testing(shared.as_ref()),
|
||||||
2,
|
2,
|
||||||
"bypass path must not mutate dedup cardinality"
|
"bypass path must not mutate dedup cardinality"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
*dedup
|
desync_dedup_get_for_testing(shared.as_ref(), 0xAAAABBBBCCCCDDDD)
|
||||||
.get(&0xAAAABBBBCCCCDDDD)
|
|
||||||
.expect("seed key must remain"),
|
.expect("seed key must remain"),
|
||||||
seed_time,
|
seed_time,
|
||||||
"bypass path must not refresh existing dedup timestamps"
|
"bypass path must not refresh existing dedup timestamps"
|
||||||
);
|
);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
*dedup
|
desync_dedup_get_for_testing(shared.as_ref(), 0x1111222233334444)
|
||||||
.get(&0x1111222233334444)
|
|
||||||
.expect("seed key must remain"),
|
.expect("seed key must remain"),
|
||||||
seed_time,
|
seed_time,
|
||||||
"bypass path must not touch unrelated dedup entries"
|
"bypass path must not touch unrelated dedup entries"
|
||||||
|
|
@ -70,14 +63,12 @@ fn desync_all_full_bypass_keeps_existing_dedup_entries_unchanged() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn edge_all_full_burst_does_not_poison_later_false_path_tracking() {
|
fn edge_all_full_burst_does_not_poison_later_false_path_tracking() {
|
||||||
let _guard = desync_dedup_test_lock()
|
let shared = ProxySharedState::new();
|
||||||
.lock()
|
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
|
||||||
.expect("desync dedup test lock must be available");
|
|
||||||
clear_desync_dedup_for_testing();
|
|
||||||
|
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
for i in 0..8192u64 {
|
for i in 0..8192u64 {
|
||||||
assert!(should_emit_full_desync(
|
assert!(should_emit_full_desync_for_testing(shared.as_ref(),
|
||||||
0xABCD_0000_0000_0000 ^ i,
|
0xABCD_0000_0000_0000 ^ i,
|
||||||
true,
|
true,
|
||||||
now
|
now
|
||||||
|
|
@ -86,26 +77,20 @@ fn edge_all_full_burst_does_not_poison_later_false_path_tracking() {
|
||||||
|
|
||||||
let tracked_key = 0xDEAD_BEEF_0000_0001u64;
|
let tracked_key = 0xDEAD_BEEF_0000_0001u64;
|
||||||
assert!(
|
assert!(
|
||||||
should_emit_full_desync(tracked_key, false, now),
|
should_emit_full_desync_for_testing(shared.as_ref(), tracked_key, false, now),
|
||||||
"first false-path event after all_full burst must still be tracked and emitted"
|
"first false-path event after all_full burst must still be tracked and emitted"
|
||||||
);
|
);
|
||||||
|
|
||||||
let dedup = DESYNC_DEDUP
|
assert!(desync_dedup_get_for_testing(shared.as_ref(), tracked_key).is_some());
|
||||||
.get()
|
|
||||||
.expect("false path should initialize dedup");
|
|
||||||
assert!(dedup.get(&tracked_key).is_some());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn adversarial_mixed_sequence_true_steps_never_change_cache_len() {
|
fn adversarial_mixed_sequence_true_steps_never_change_cache_len() {
|
||||||
let _guard = desync_dedup_test_lock()
|
let shared = ProxySharedState::new();
|
||||||
.lock()
|
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
|
||||||
.expect("desync dedup test lock must be available");
|
|
||||||
clear_desync_dedup_for_testing();
|
|
||||||
|
|
||||||
let dedup = DESYNC_DEDUP.get_or_init(DashMap::new);
|
|
||||||
for i in 0..256u64 {
|
for i in 0..256u64 {
|
||||||
dedup.insert(0x1000_0000_0000_0000 ^ i, Instant::now());
|
desync_dedup_insert_for_testing(shared.as_ref(), 0x1000_0000_0000_0000 ^ i, Instant::now());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut seed = 0xC0DE_CAFE_BAAD_F00Du64;
|
let mut seed = 0xC0DE_CAFE_BAAD_F00Du64;
|
||||||
|
|
@ -116,9 +101,9 @@ fn adversarial_mixed_sequence_true_steps_never_change_cache_len() {
|
||||||
|
|
||||||
let flag_all_full = (seed & 0x1) == 1;
|
let flag_all_full = (seed & 0x1) == 1;
|
||||||
let key = 0x7000_0000_0000_0000u64 ^ i ^ seed;
|
let key = 0x7000_0000_0000_0000u64 ^ i ^ seed;
|
||||||
let before = dedup.len();
|
let before = desync_dedup_len_for_testing(shared.as_ref());
|
||||||
let _ = should_emit_full_desync(key, flag_all_full, Instant::now());
|
let _ = should_emit_full_desync_for_testing(shared.as_ref(), key, flag_all_full, Instant::now());
|
||||||
let after = dedup.len();
|
let after = desync_dedup_len_for_testing(shared.as_ref());
|
||||||
|
|
||||||
if flag_all_full {
|
if flag_all_full {
|
||||||
assert_eq!(after, before, "all_full step must not mutate dedup length");
|
assert_eq!(after, before, "all_full step must not mutate dedup length");
|
||||||
|
|
@ -128,50 +113,46 @@ fn adversarial_mixed_sequence_true_steps_never_change_cache_len() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn light_fuzz_all_full_mode_always_emits_and_stays_bounded() {
|
fn light_fuzz_all_full_mode_always_emits_and_stays_bounded() {
|
||||||
let _guard = desync_dedup_test_lock()
|
let shared = ProxySharedState::new();
|
||||||
.lock()
|
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
|
||||||
.expect("desync dedup test lock must be available");
|
|
||||||
clear_desync_dedup_for_testing();
|
|
||||||
|
|
||||||
let mut seed = 0x1234_5678_9ABC_DEF0u64;
|
let mut seed = 0x1234_5678_9ABC_DEF0u64;
|
||||||
let before = DESYNC_DEDUP.get().map(|d| d.len()).unwrap_or(0);
|
let before = desync_dedup_len_for_testing(shared.as_ref());
|
||||||
|
|
||||||
for _ in 0..20_000 {
|
for _ in 0..20_000 {
|
||||||
seed ^= seed << 7;
|
seed ^= seed << 7;
|
||||||
seed ^= seed >> 9;
|
seed ^= seed >> 9;
|
||||||
seed ^= seed << 8;
|
seed ^= seed << 8;
|
||||||
let key = seed ^ 0x55AA_55AA_55AA_55AAu64;
|
let key = seed ^ 0x55AA_55AA_55AA_55AAu64;
|
||||||
assert!(should_emit_full_desync(key, true, Instant::now()));
|
assert!(should_emit_full_desync_for_testing(shared.as_ref(), key, true, Instant::now()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let after = DESYNC_DEDUP.get().map(|d| d.len()).unwrap_or(0);
|
let after = desync_dedup_len_for_testing(shared.as_ref());
|
||||||
assert_eq!(after, before);
|
assert_eq!(after, before);
|
||||||
assert!(after <= DESYNC_DEDUP_MAX_ENTRIES);
|
assert!(after <= DESYNC_DEDUP_MAX_ENTRIES);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn stress_parallel_all_full_storm_does_not_grow_or_mutate_cache() {
|
fn stress_parallel_all_full_storm_does_not_grow_or_mutate_cache() {
|
||||||
let _guard = desync_dedup_test_lock()
|
let shared = ProxySharedState::new();
|
||||||
.lock()
|
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
|
||||||
.expect("desync dedup test lock must be available");
|
|
||||||
clear_desync_dedup_for_testing();
|
|
||||||
|
|
||||||
let dedup = DESYNC_DEDUP.get_or_init(DashMap::new);
|
|
||||||
let seed_time = Instant::now() - Duration::from_secs(2);
|
let seed_time = Instant::now() - Duration::from_secs(2);
|
||||||
for i in 0..1024u64 {
|
for i in 0..1024u64 {
|
||||||
dedup.insert(0x8888_0000_0000_0000 ^ i, seed_time);
|
desync_dedup_insert_for_testing(shared.as_ref(), 0x8888_0000_0000_0000 ^ i, seed_time);
|
||||||
}
|
}
|
||||||
let before_len = dedup.len();
|
let before_len = desync_dedup_len_for_testing(shared.as_ref());
|
||||||
|
|
||||||
let emits = Arc::new(AtomicUsize::new(0));
|
let emits = Arc::new(AtomicUsize::new(0));
|
||||||
let mut workers = Vec::new();
|
let mut workers = Vec::new();
|
||||||
for worker in 0..16u64 {
|
for worker in 0..16u64 {
|
||||||
let emits = Arc::clone(&emits);
|
let emits = Arc::clone(&emits);
|
||||||
|
let shared = shared.clone();
|
||||||
workers.push(thread::spawn(move || {
|
workers.push(thread::spawn(move || {
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
for i in 0..4096u64 {
|
for i in 0..4096u64 {
|
||||||
let key = 0xFACE_0000_0000_0000u64 ^ (worker << 20) ^ i;
|
let key = 0xFACE_0000_0000_0000u64 ^ (worker << 20) ^ i;
|
||||||
if should_emit_full_desync(key, true, now) {
|
if should_emit_full_desync_for_testing(shared.as_ref(), key, true, now) {
|
||||||
emits.fetch_add(1, Ordering::Relaxed);
|
emits.fetch_add(1, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -184,7 +165,7 @@ fn stress_parallel_all_full_storm_does_not_grow_or_mutate_cache() {
|
||||||
|
|
||||||
assert_eq!(emits.load(Ordering::Relaxed), 16 * 4096);
|
assert_eq!(emits.load(Ordering::Relaxed), 16 * 4096);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
dedup.len(),
|
desync_dedup_len_for_testing(shared.as_ref()),
|
||||||
before_len,
|
before_len,
|
||||||
"parallel all_full storm must not mutate cache len"
|
"parallel all_full storm must not mutate cache len"
|
||||||
);
|
);
|
||||||
|
|
|
||||||
|
|
@ -360,73 +360,73 @@ async fn stress_many_idle_sessions_fail_closed_without_hang() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn pressure_evicts_oldest_idle_candidate_with_deterministic_ordering() {
|
fn pressure_evicts_oldest_idle_candidate_with_deterministic_ordering() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
let stats = Stats::new();
|
let stats = Stats::new();
|
||||||
|
|
||||||
assert!(mark_relay_idle_candidate(10));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 10));
|
||||||
assert!(mark_relay_idle_candidate(11));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 11));
|
||||||
assert_eq!(oldest_relay_idle_candidate(), Some(10));
|
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(10));
|
||||||
|
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
|
|
||||||
let mut seen_for_newer = 0u64;
|
let mut seen_for_newer = 0u64;
|
||||||
assert!(
|
assert!(
|
||||||
!maybe_evict_idle_candidate_on_pressure(11, &mut seen_for_newer, &stats),
|
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 11, &mut seen_for_newer, &stats),
|
||||||
"newer idle candidate must not be evicted while older candidate exists"
|
"newer idle candidate must not be evicted while older candidate exists"
|
||||||
);
|
);
|
||||||
assert_eq!(oldest_relay_idle_candidate(), Some(10));
|
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(10));
|
||||||
|
|
||||||
let mut seen_for_oldest = 0u64;
|
let mut seen_for_oldest = 0u64;
|
||||||
assert!(
|
assert!(
|
||||||
maybe_evict_idle_candidate_on_pressure(10, &mut seen_for_oldest, &stats),
|
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 10, &mut seen_for_oldest, &stats),
|
||||||
"oldest idle candidate must be evicted first under pressure"
|
"oldest idle candidate must be evicted first under pressure"
|
||||||
);
|
);
|
||||||
assert_eq!(oldest_relay_idle_candidate(), Some(11));
|
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(11));
|
||||||
assert_eq!(stats.get_relay_pressure_evict_total(), 1);
|
assert_eq!(stats.get_relay_pressure_evict_total(), 1);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn pressure_does_not_evict_without_new_pressure_signal() {
|
fn pressure_does_not_evict_without_new_pressure_signal() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
let stats = Stats::new();
|
let stats = Stats::new();
|
||||||
|
|
||||||
assert!(mark_relay_idle_candidate(21));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 21));
|
||||||
let mut seen = relay_pressure_event_seq();
|
let mut seen = relay_pressure_event_seq_for_testing(shared.as_ref());
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
!maybe_evict_idle_candidate_on_pressure(21, &mut seen, &stats),
|
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 21, &mut seen, &stats),
|
||||||
"without new pressure signal, candidate must stay"
|
"without new pressure signal, candidate must stay"
|
||||||
);
|
);
|
||||||
assert_eq!(stats.get_relay_pressure_evict_total(), 0);
|
assert_eq!(stats.get_relay_pressure_evict_total(), 0);
|
||||||
assert_eq!(oldest_relay_idle_candidate(), Some(21));
|
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(21));
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn stress_pressure_eviction_preserves_fifo_across_many_candidates() {
|
fn stress_pressure_eviction_preserves_fifo_across_many_candidates() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
let stats = Stats::new();
|
let stats = Stats::new();
|
||||||
|
|
||||||
let mut seen_per_conn = std::collections::HashMap::new();
|
let mut seen_per_conn = std::collections::HashMap::new();
|
||||||
for conn_id in 1000u64..1064u64 {
|
for conn_id in 1000u64..1064u64 {
|
||||||
assert!(mark_relay_idle_candidate(conn_id));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), conn_id));
|
||||||
seen_per_conn.insert(conn_id, 0u64);
|
seen_per_conn.insert(conn_id, 0u64);
|
||||||
}
|
}
|
||||||
|
|
||||||
for expected in 1000u64..1064u64 {
|
for expected in 1000u64..1064u64 {
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
|
|
||||||
let mut seen = *seen_per_conn
|
let mut seen = *seen_per_conn
|
||||||
.get(&expected)
|
.get(&expected)
|
||||||
.expect("per-conn pressure cursor must exist");
|
.expect("per-conn pressure cursor must exist");
|
||||||
assert!(
|
assert!(
|
||||||
maybe_evict_idle_candidate_on_pressure(expected, &mut seen, &stats),
|
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), expected, &mut seen, &stats),
|
||||||
"expected conn_id {expected} must be evicted next by deterministic FIFO ordering"
|
"expected conn_id {expected} must be evicted next by deterministic FIFO ordering"
|
||||||
);
|
);
|
||||||
seen_per_conn.insert(expected, seen);
|
seen_per_conn.insert(expected, seen);
|
||||||
|
|
@ -436,33 +436,33 @@ fn stress_pressure_eviction_preserves_fifo_across_many_candidates() {
|
||||||
} else {
|
} else {
|
||||||
Some(expected + 1)
|
Some(expected + 1)
|
||||||
};
|
};
|
||||||
assert_eq!(oldest_relay_idle_candidate(), next);
|
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), next);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(stats.get_relay_pressure_evict_total(), 64);
|
assert_eq!(stats.get_relay_pressure_evict_total(), 64);
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn blackhat_single_pressure_event_must_not_evict_more_than_one_candidate() {
|
fn blackhat_single_pressure_event_must_not_evict_more_than_one_candidate() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
let stats = Stats::new();
|
let stats = Stats::new();
|
||||||
|
|
||||||
assert!(mark_relay_idle_candidate(301));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 301));
|
||||||
assert!(mark_relay_idle_candidate(302));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 302));
|
||||||
assert!(mark_relay_idle_candidate(303));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 303));
|
||||||
|
|
||||||
let mut seen_301 = 0u64;
|
let mut seen_301 = 0u64;
|
||||||
let mut seen_302 = 0u64;
|
let mut seen_302 = 0u64;
|
||||||
let mut seen_303 = 0u64;
|
let mut seen_303 = 0u64;
|
||||||
|
|
||||||
// Single pressure event should authorize at most one eviction globally.
|
// Single pressure event should authorize at most one eviction globally.
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
|
|
||||||
let evicted_301 = maybe_evict_idle_candidate_on_pressure(301, &mut seen_301, &stats);
|
let evicted_301 = maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 301, &mut seen_301, &stats);
|
||||||
let evicted_302 = maybe_evict_idle_candidate_on_pressure(302, &mut seen_302, &stats);
|
let evicted_302 = maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 302, &mut seen_302, &stats);
|
||||||
let evicted_303 = maybe_evict_idle_candidate_on_pressure(303, &mut seen_303, &stats);
|
let evicted_303 = maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 303, &mut seen_303, &stats);
|
||||||
|
|
||||||
let evicted_total = [evicted_301, evicted_302, evicted_303]
|
let evicted_total = [evicted_301, evicted_302, evicted_303]
|
||||||
.iter()
|
.iter()
|
||||||
|
|
@ -474,30 +474,30 @@ fn blackhat_single_pressure_event_must_not_evict_more_than_one_candidate() {
|
||||||
"single pressure event must not cascade-evict multiple idle candidates"
|
"single pressure event must not cascade-evict multiple idle candidates"
|
||||||
);
|
);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn blackhat_pressure_counter_must_track_global_budget_not_per_session_cursor() {
|
fn blackhat_pressure_counter_must_track_global_budget_not_per_session_cursor() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
let stats = Stats::new();
|
let stats = Stats::new();
|
||||||
|
|
||||||
assert!(mark_relay_idle_candidate(401));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 401));
|
||||||
assert!(mark_relay_idle_candidate(402));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 402));
|
||||||
|
|
||||||
let mut seen_oldest = 0u64;
|
let mut seen_oldest = 0u64;
|
||||||
let mut seen_next = 0u64;
|
let mut seen_next = 0u64;
|
||||||
|
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
maybe_evict_idle_candidate_on_pressure(401, &mut seen_oldest, &stats),
|
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 401, &mut seen_oldest, &stats),
|
||||||
"oldest candidate must consume pressure budget first"
|
"oldest candidate must consume pressure budget first"
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
!maybe_evict_idle_candidate_on_pressure(402, &mut seen_next, &stats),
|
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 402, &mut seen_next, &stats),
|
||||||
"next candidate must not consume the same pressure budget"
|
"next candidate must not consume the same pressure budget"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
@ -507,47 +507,47 @@ fn blackhat_pressure_counter_must_track_global_budget_not_per_session_cursor() {
|
||||||
"single pressure budget must produce exactly one eviction"
|
"single pressure budget must produce exactly one eviction"
|
||||||
);
|
);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn blackhat_stale_pressure_before_idle_mark_must_not_trigger_eviction() {
|
fn blackhat_stale_pressure_before_idle_mark_must_not_trigger_eviction() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
let stats = Stats::new();
|
let stats = Stats::new();
|
||||||
|
|
||||||
// Pressure happened before any idle candidate existed.
|
// Pressure happened before any idle candidate existed.
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
assert!(mark_relay_idle_candidate(501));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 501));
|
||||||
|
|
||||||
let mut seen = 0u64;
|
let mut seen = 0u64;
|
||||||
assert!(
|
assert!(
|
||||||
!maybe_evict_idle_candidate_on_pressure(501, &mut seen, &stats),
|
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 501, &mut seen, &stats),
|
||||||
"stale pressure (before soft-idle mark) must not evict newly marked candidate"
|
"stale pressure (before soft-idle mark) must not evict newly marked candidate"
|
||||||
);
|
);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn blackhat_stale_pressure_must_not_evict_any_of_newly_marked_batch() {
|
fn blackhat_stale_pressure_must_not_evict_any_of_newly_marked_batch() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
let stats = Stats::new();
|
let stats = Stats::new();
|
||||||
|
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
assert!(mark_relay_idle_candidate(511));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 511));
|
||||||
assert!(mark_relay_idle_candidate(512));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 512));
|
||||||
assert!(mark_relay_idle_candidate(513));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 513));
|
||||||
|
|
||||||
let mut seen_511 = 0u64;
|
let mut seen_511 = 0u64;
|
||||||
let mut seen_512 = 0u64;
|
let mut seen_512 = 0u64;
|
||||||
let mut seen_513 = 0u64;
|
let mut seen_513 = 0u64;
|
||||||
|
|
||||||
let evicted = [
|
let evicted = [
|
||||||
maybe_evict_idle_candidate_on_pressure(511, &mut seen_511, &stats),
|
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 511, &mut seen_511, &stats),
|
||||||
maybe_evict_idle_candidate_on_pressure(512, &mut seen_512, &stats),
|
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 512, &mut seen_512, &stats),
|
||||||
maybe_evict_idle_candidate_on_pressure(513, &mut seen_513, &stats),
|
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 513, &mut seen_513, &stats),
|
||||||
]
|
]
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|value| **value)
|
.filter(|value| **value)
|
||||||
|
|
@ -558,111 +558,103 @@ fn blackhat_stale_pressure_must_not_evict_any_of_newly_marked_batch() {
|
||||||
"stale pressure event must not evict any candidate from a newly marked batch"
|
"stale pressure event must not evict any candidate from a newly marked batch"
|
||||||
);
|
);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn blackhat_stale_pressure_seen_without_candidates_must_be_globally_invalidated() {
|
fn blackhat_stale_pressure_seen_without_candidates_must_be_globally_invalidated() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
let stats = Stats::new();
|
let stats = Stats::new();
|
||||||
|
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
|
|
||||||
// Session A observed pressure while there were no candidates.
|
// Session A observed pressure while there were no candidates.
|
||||||
let mut seen_a = 0u64;
|
let mut seen_a = 0u64;
|
||||||
assert!(
|
assert!(
|
||||||
!maybe_evict_idle_candidate_on_pressure(999_001, &mut seen_a, &stats),
|
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 999_001, &mut seen_a, &stats),
|
||||||
"no candidate existed, so no eviction is possible"
|
"no candidate existed, so no eviction is possible"
|
||||||
);
|
);
|
||||||
|
|
||||||
// Candidate appears later; Session B must not be able to consume stale pressure.
|
// Candidate appears later; Session B must not be able to consume stale pressure.
|
||||||
assert!(mark_relay_idle_candidate(521));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 521));
|
||||||
let mut seen_b = 0u64;
|
let mut seen_b = 0u64;
|
||||||
assert!(
|
assert!(
|
||||||
!maybe_evict_idle_candidate_on_pressure(521, &mut seen_b, &stats),
|
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 521, &mut seen_b, &stats),
|
||||||
"once pressure is observed with empty candidate set, it must not be replayed later"
|
"once pressure is observed with empty candidate set, it must not be replayed later"
|
||||||
);
|
);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn blackhat_stale_pressure_must_not_survive_candidate_churn() {
|
fn blackhat_stale_pressure_must_not_survive_candidate_churn() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
let stats = Stats::new();
|
let stats = Stats::new();
|
||||||
|
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
assert!(mark_relay_idle_candidate(531));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 531));
|
||||||
clear_relay_idle_candidate(531);
|
clear_relay_idle_candidate_for_testing(shared.as_ref(), 531);
|
||||||
assert!(mark_relay_idle_candidate(532));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 532));
|
||||||
|
|
||||||
let mut seen = 0u64;
|
let mut seen = 0u64;
|
||||||
assert!(
|
assert!(
|
||||||
!maybe_evict_idle_candidate_on_pressure(532, &mut seen, &stats),
|
!maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), 532, &mut seen, &stats),
|
||||||
"stale pressure must not survive clear+remark churn cycles"
|
"stale pressure must not survive clear+remark churn cycles"
|
||||||
);
|
);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn blackhat_pressure_seq_saturation_must_not_disable_future_pressure_accounting() {
|
fn blackhat_pressure_seq_saturation_must_not_disable_future_pressure_accounting() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut guard = relay_idle_candidate_registry()
|
set_relay_pressure_state_for_testing(shared.as_ref(), u64::MAX, u64::MAX - 1);
|
||||||
.lock()
|
|
||||||
.expect("registry lock must be available");
|
|
||||||
guard.pressure_event_seq = u64::MAX;
|
|
||||||
guard.pressure_consumed_seq = u64::MAX - 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// A new pressure event should still be representable; saturating at MAX creates a permanent lockout.
|
// A new pressure event should still be representable; saturating at MAX creates a permanent lockout.
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
let after = relay_pressure_event_seq();
|
let after = relay_pressure_event_seq_for_testing(shared.as_ref());
|
||||||
assert_ne!(
|
assert_ne!(
|
||||||
after,
|
after,
|
||||||
u64::MAX,
|
u64::MAX,
|
||||||
"pressure sequence saturation must not permanently freeze event progression"
|
"pressure sequence saturation must not permanently freeze event progression"
|
||||||
);
|
);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn blackhat_pressure_seq_saturation_must_not_break_multiple_distinct_events() {
|
fn blackhat_pressure_seq_saturation_must_not_break_multiple_distinct_events() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut guard = relay_idle_candidate_registry()
|
set_relay_pressure_state_for_testing(shared.as_ref(), u64::MAX, u64::MAX);
|
||||||
.lock()
|
|
||||||
.expect("registry lock must be available");
|
|
||||||
guard.pressure_event_seq = u64::MAX;
|
|
||||||
guard.pressure_consumed_seq = u64::MAX;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
let first = relay_pressure_event_seq();
|
let first = relay_pressure_event_seq_for_testing(shared.as_ref());
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
let second = relay_pressure_event_seq();
|
let second = relay_pressure_event_seq_for_testing(shared.as_ref());
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
second > first,
|
second > first,
|
||||||
"distinct pressure events must remain distinguishable even at sequence boundary"
|
"distinct pressure events must remain distinguishable even at sequence boundary"
|
||||||
);
|
);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn integration_race_single_pressure_event_allows_at_most_one_eviction_under_parallel_claims()
|
async fn integration_race_single_pressure_event_allows_at_most_one_eviction_under_parallel_claims()
|
||||||
{
|
{
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let stats = Arc::new(Stats::new());
|
let stats = Arc::new(Stats::new());
|
||||||
let sessions = 16usize;
|
let sessions = 16usize;
|
||||||
|
|
@ -671,20 +663,21 @@ async fn integration_race_single_pressure_event_allows_at_most_one_eviction_unde
|
||||||
let mut seen_per_session = vec![0u64; sessions];
|
let mut seen_per_session = vec![0u64; sessions];
|
||||||
|
|
||||||
for conn_id in &conn_ids {
|
for conn_id in &conn_ids {
|
||||||
assert!(mark_relay_idle_candidate(*conn_id));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
for round in 0..rounds {
|
for round in 0..rounds {
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
|
|
||||||
let mut joins = Vec::with_capacity(sessions);
|
let mut joins = Vec::with_capacity(sessions);
|
||||||
for (idx, conn_id) in conn_ids.iter().enumerate() {
|
for (idx, conn_id) in conn_ids.iter().enumerate() {
|
||||||
let mut seen = seen_per_session[idx];
|
let mut seen = seen_per_session[idx];
|
||||||
let conn_id = *conn_id;
|
let conn_id = *conn_id;
|
||||||
let stats = stats.clone();
|
let stats = stats.clone();
|
||||||
|
let shared = shared.clone();
|
||||||
joins.push(tokio::spawn(async move {
|
joins.push(tokio::spawn(async move {
|
||||||
let evicted =
|
let evicted =
|
||||||
maybe_evict_idle_candidate_on_pressure(conn_id, &mut seen, stats.as_ref());
|
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), conn_id, &mut seen, stats.as_ref());
|
||||||
(idx, conn_id, seen, evicted)
|
(idx, conn_id, seen, evicted)
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
@ -706,7 +699,7 @@ async fn integration_race_single_pressure_event_allows_at_most_one_eviction_unde
|
||||||
);
|
);
|
||||||
if let Some(conn) = evicted_conn {
|
if let Some(conn) = evicted_conn {
|
||||||
assert!(
|
assert!(
|
||||||
mark_relay_idle_candidate(conn),
|
mark_relay_idle_candidate_for_testing(shared.as_ref(), conn),
|
||||||
"round {round}: evicted conn must be re-markable as idle candidate"
|
"round {round}: evicted conn must be re-markable as idle candidate"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
@ -721,13 +714,13 @@ async fn integration_race_single_pressure_event_allows_at_most_one_eviction_unde
|
||||||
"parallel race must still observe at least one successful eviction"
|
"parallel race must still observe at least one successful eviction"
|
||||||
);
|
);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalidation_and_budget() {
|
async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalidation_and_budget() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let stats = Arc::new(Stats::new());
|
let stats = Arc::new(Stats::new());
|
||||||
let sessions = 12usize;
|
let sessions = 12usize;
|
||||||
|
|
@ -736,7 +729,7 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida
|
||||||
let mut seen_per_session = vec![0u64; sessions];
|
let mut seen_per_session = vec![0u64; sessions];
|
||||||
|
|
||||||
for conn_id in &conn_ids {
|
for conn_id in &conn_ids {
|
||||||
assert!(mark_relay_idle_candidate(*conn_id));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut expected_total_evictions = 0u64;
|
let mut expected_total_evictions = 0u64;
|
||||||
|
|
@ -745,20 +738,21 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida
|
||||||
let empty_phase = round % 5 == 0;
|
let empty_phase = round % 5 == 0;
|
||||||
if empty_phase {
|
if empty_phase {
|
||||||
for conn_id in &conn_ids {
|
for conn_id in &conn_ids {
|
||||||
clear_relay_idle_candidate(*conn_id);
|
clear_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
|
|
||||||
let mut joins = Vec::with_capacity(sessions);
|
let mut joins = Vec::with_capacity(sessions);
|
||||||
for (idx, conn_id) in conn_ids.iter().enumerate() {
|
for (idx, conn_id) in conn_ids.iter().enumerate() {
|
||||||
let mut seen = seen_per_session[idx];
|
let mut seen = seen_per_session[idx];
|
||||||
let conn_id = *conn_id;
|
let conn_id = *conn_id;
|
||||||
let stats = stats.clone();
|
let stats = stats.clone();
|
||||||
|
let shared = shared.clone();
|
||||||
joins.push(tokio::spawn(async move {
|
joins.push(tokio::spawn(async move {
|
||||||
let evicted =
|
let evicted =
|
||||||
maybe_evict_idle_candidate_on_pressure(conn_id, &mut seen, stats.as_ref());
|
maybe_evict_idle_candidate_on_pressure_for_testing(shared.as_ref(), conn_id, &mut seen, stats.as_ref());
|
||||||
(idx, conn_id, seen, evicted)
|
(idx, conn_id, seen, evicted)
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
@ -780,7 +774,7 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida
|
||||||
"round {round}: empty candidate phase must not allow stale-pressure eviction"
|
"round {round}: empty candidate phase must not allow stale-pressure eviction"
|
||||||
);
|
);
|
||||||
for conn_id in &conn_ids {
|
for conn_id in &conn_ids {
|
||||||
assert!(mark_relay_idle_candidate(*conn_id));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), *conn_id));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert!(
|
assert!(
|
||||||
|
|
@ -789,7 +783,7 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida
|
||||||
);
|
);
|
||||||
if let Some(conn_id) = evicted_conn {
|
if let Some(conn_id) = evicted_conn {
|
||||||
expected_total_evictions = expected_total_evictions.saturating_add(1);
|
expected_total_evictions = expected_total_evictions.saturating_add(1);
|
||||||
assert!(mark_relay_idle_candidate(conn_id));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), conn_id));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -800,5 +794,5 @@ async fn integration_race_burst_pressure_with_churn_preserves_empty_set_invalida
|
||||||
"global pressure eviction counter must match observed per-round successful consumes"
|
"global pressure eviction counter must match observed per-round successful consumes"
|
||||||
);
|
);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,12 +3,13 @@ use std::panic::{AssertUnwindSafe, catch_unwind};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn blackhat_registry_poison_recovers_with_fail_closed_reset_and_pressure_accounting() {
|
fn blackhat_registry_poison_recovers_with_fail_closed_reset_and_pressure_accounting() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let _ = catch_unwind(AssertUnwindSafe(|| {
|
let _ = catch_unwind(AssertUnwindSafe(|| {
|
||||||
let registry = relay_idle_candidate_registry();
|
let mut guard = shared
|
||||||
let mut guard = registry
|
.middle_relay
|
||||||
|
.relay_idle_registry
|
||||||
.lock()
|
.lock()
|
||||||
.expect("registry lock must be acquired before poison");
|
.expect("registry lock must be acquired before poison");
|
||||||
guard.by_conn_id.insert(
|
guard.by_conn_id.insert(
|
||||||
|
|
@ -23,40 +24,41 @@ fn blackhat_registry_poison_recovers_with_fail_closed_reset_and_pressure_account
|
||||||
}));
|
}));
|
||||||
|
|
||||||
// Helper lock must recover from poison, reset stale state, and continue.
|
// Helper lock must recover from poison, reset stale state, and continue.
|
||||||
assert!(mark_relay_idle_candidate(42));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 42));
|
||||||
assert_eq!(oldest_relay_idle_candidate(), Some(42));
|
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(42));
|
||||||
|
|
||||||
let before = relay_pressure_event_seq();
|
let before = relay_pressure_event_seq_for_testing(shared.as_ref());
|
||||||
note_relay_pressure_event();
|
note_relay_pressure_event_for_testing(shared.as_ref());
|
||||||
let after = relay_pressure_event_seq();
|
let after = relay_pressure_event_seq_for_testing(shared.as_ref());
|
||||||
assert!(
|
assert!(
|
||||||
after > before,
|
after > before,
|
||||||
"pressure accounting must still advance after poison"
|
"pressure accounting must still advance after poison"
|
||||||
);
|
);
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn clear_state_helper_must_reset_poisoned_registry_for_deterministic_fifo_tests() {
|
fn clear_state_helper_must_reset_poisoned_registry_for_deterministic_fifo_tests() {
|
||||||
let _guard = relay_idle_pressure_test_scope();
|
let shared = ProxySharedState::new();
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
let _ = catch_unwind(AssertUnwindSafe(|| {
|
let _ = catch_unwind(AssertUnwindSafe(|| {
|
||||||
let registry = relay_idle_candidate_registry();
|
let _guard = shared
|
||||||
let _guard = registry
|
.middle_relay
|
||||||
|
.relay_idle_registry
|
||||||
.lock()
|
.lock()
|
||||||
.expect("registry lock must be acquired before poison");
|
.expect("registry lock must be acquired before poison");
|
||||||
panic!("intentional poison while lock held");
|
panic!("intentional poison while lock held");
|
||||||
}));
|
}));
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
assert_eq!(oldest_relay_idle_candidate(), None);
|
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), None);
|
||||||
assert_eq!(relay_pressure_event_seq(), 0);
|
assert_eq!(relay_pressure_event_seq_for_testing(shared.as_ref()), 0);
|
||||||
|
|
||||||
assert!(mark_relay_idle_candidate(7));
|
assert!(mark_relay_idle_candidate_for_testing(shared.as_ref(), 7));
|
||||||
assert_eq!(oldest_relay_idle_candidate(), Some(7));
|
assert_eq!(oldest_relay_idle_candidate_for_testing(shared.as_ref()), Some(7));
|
||||||
|
|
||||||
clear_relay_idle_pressure_state_for_testing();
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,6 @@
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::stats::Stats;
|
use crate::stats::Stats;
|
||||||
use crate::stream::BufferPool;
|
use crate::stream::BufferPool;
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use tokio::time::{Duration as TokioDuration, timeout};
|
use tokio::time::{Duration as TokioDuration, timeout};
|
||||||
|
|
||||||
|
|
@ -16,32 +15,30 @@ fn make_pooled_payload(data: &[u8]) -> PooledBuffer {
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore = "Tracking for M-04: Verify should_emit_full_desync returns true on first occurrence and false on duplicate within window"]
|
#[ignore = "Tracking for M-04: Verify should_emit_full_desync returns true on first occurrence and false on duplicate within window"]
|
||||||
fn should_emit_full_desync_filters_duplicates() {
|
fn should_emit_full_desync_filters_duplicates() {
|
||||||
let _guard = desync_dedup_test_lock()
|
let shared = ProxySharedState::new();
|
||||||
.lock()
|
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
|
||||||
.expect("desync dedup test lock must be available");
|
|
||||||
clear_desync_dedup_for_testing();
|
|
||||||
|
|
||||||
let key = 0x4D04_0000_0000_0001_u64;
|
let key = 0x4D04_0000_0000_0001_u64;
|
||||||
let base = Instant::now();
|
let base = Instant::now();
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
should_emit_full_desync(key, false, base),
|
should_emit_full_desync_for_testing(shared.as_ref(), key, false, base),
|
||||||
"first occurrence must emit full forensic record"
|
"first occurrence must emit full forensic record"
|
||||||
);
|
);
|
||||||
assert!(
|
assert!(
|
||||||
!should_emit_full_desync(key, false, base),
|
!should_emit_full_desync_for_testing(shared.as_ref(), key, false, base),
|
||||||
"duplicate at same timestamp must be suppressed"
|
"duplicate at same timestamp must be suppressed"
|
||||||
);
|
);
|
||||||
|
|
||||||
let within_window = base + DESYNC_DEDUP_WINDOW - TokioDuration::from_millis(1);
|
let within_window = base + DESYNC_DEDUP_WINDOW - TokioDuration::from_millis(1);
|
||||||
assert!(
|
assert!(
|
||||||
!should_emit_full_desync(key, false, within_window),
|
!should_emit_full_desync_for_testing(shared.as_ref(), key, false, within_window),
|
||||||
"duplicate strictly inside dedup window must stay suppressed"
|
"duplicate strictly inside dedup window must stay suppressed"
|
||||||
);
|
);
|
||||||
|
|
||||||
let on_window_edge = base + DESYNC_DEDUP_WINDOW;
|
let on_window_edge = base + DESYNC_DEDUP_WINDOW;
|
||||||
assert!(
|
assert!(
|
||||||
should_emit_full_desync(key, false, on_window_edge),
|
should_emit_full_desync_for_testing(shared.as_ref(), key, false, on_window_edge),
|
||||||
"duplicate at window boundary must re-emit and refresh"
|
"duplicate at window boundary must re-emit and refresh"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
@ -49,39 +46,34 @@ fn should_emit_full_desync_filters_duplicates() {
|
||||||
#[test]
|
#[test]
|
||||||
#[ignore = "Tracking for M-04: Verify desync dedup eviction behaves correctly under map-full condition"]
|
#[ignore = "Tracking for M-04: Verify desync dedup eviction behaves correctly under map-full condition"]
|
||||||
fn desync_dedup_eviction_under_map_full_condition() {
|
fn desync_dedup_eviction_under_map_full_condition() {
|
||||||
let _guard = desync_dedup_test_lock()
|
let shared = ProxySharedState::new();
|
||||||
.lock()
|
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
|
||||||
.expect("desync dedup test lock must be available");
|
|
||||||
clear_desync_dedup_for_testing();
|
|
||||||
|
|
||||||
let base = Instant::now();
|
let base = Instant::now();
|
||||||
for key in 0..DESYNC_DEDUP_MAX_ENTRIES as u64 {
|
for key in 0..DESYNC_DEDUP_MAX_ENTRIES as u64 {
|
||||||
assert!(
|
assert!(
|
||||||
should_emit_full_desync(key, false, base),
|
should_emit_full_desync_for_testing(shared.as_ref(), key, false, base),
|
||||||
"unique key should be inserted while warming dedup cache"
|
"unique key should be inserted while warming dedup cache"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let dedup = DESYNC_DEDUP
|
|
||||||
.get()
|
|
||||||
.expect("dedup map must exist after warm-up insertions");
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
dedup.len(),
|
desync_dedup_len_for_testing(shared.as_ref()),
|
||||||
DESYNC_DEDUP_MAX_ENTRIES,
|
DESYNC_DEDUP_MAX_ENTRIES,
|
||||||
"cache warm-up must reach exact hard cap"
|
"cache warm-up must reach exact hard cap"
|
||||||
);
|
);
|
||||||
|
|
||||||
let before_keys: HashSet<u64> = dedup.iter().map(|entry| *entry.key()).collect();
|
let before_keys = desync_dedup_keys_for_testing(shared.as_ref());
|
||||||
let newcomer_key = 0x4D04_FFFF_FFFF_0001_u64;
|
let newcomer_key = 0x4D04_FFFF_FFFF_0001_u64;
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
should_emit_full_desync(newcomer_key, false, base),
|
should_emit_full_desync_for_testing(shared.as_ref(), newcomer_key, false, base),
|
||||||
"first newcomer at map-full must emit under bounded full-cache gate"
|
"first newcomer at map-full must emit under bounded full-cache gate"
|
||||||
);
|
);
|
||||||
|
|
||||||
let after_keys: HashSet<u64> = dedup.iter().map(|entry| *entry.key()).collect();
|
let after_keys = desync_dedup_keys_for_testing(shared.as_ref());
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
dedup.len(),
|
desync_dedup_len_for_testing(shared.as_ref()),
|
||||||
DESYNC_DEDUP_MAX_ENTRIES,
|
DESYNC_DEDUP_MAX_ENTRIES,
|
||||||
"map-full insertion must preserve hard capacity bound"
|
"map-full insertion must preserve hard capacity bound"
|
||||||
);
|
);
|
||||||
|
|
@ -102,7 +94,7 @@ fn desync_dedup_eviction_under_map_full_condition() {
|
||||||
);
|
);
|
||||||
|
|
||||||
assert!(
|
assert!(
|
||||||
!should_emit_full_desync(newcomer_key, false, base),
|
!should_emit_full_desync_for_testing(shared.as_ref(), newcomer_key, false, base),
|
||||||
"immediate duplicate newcomer must remain suppressed"
|
"immediate duplicate newcomer must remain suppressed"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,608 @@
|
||||||
|
use crate::proxy::handshake::{
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared, auth_probe_is_throttled_for_testing_in_shared,
|
||||||
|
auth_probe_record_failure_for_testing, clear_auth_probe_state_for_testing_in_shared,
|
||||||
|
clear_unknown_sni_warn_state_for_testing_in_shared, clear_warned_secrets_for_testing_in_shared,
|
||||||
|
should_emit_unknown_sni_warn_for_testing_in_shared, warned_secrets_for_testing_in_shared,
|
||||||
|
};
|
||||||
|
use crate::proxy::client::handle_client_stream_with_shared;
|
||||||
|
use crate::proxy::middle_relay::{
|
||||||
|
clear_desync_dedup_for_testing_in_shared, clear_relay_idle_candidate_for_testing,
|
||||||
|
clear_relay_idle_pressure_state_for_testing_in_shared, mark_relay_idle_candidate_for_testing,
|
||||||
|
maybe_evict_idle_candidate_on_pressure_for_testing, note_relay_pressure_event_for_testing,
|
||||||
|
oldest_relay_idle_candidate_for_testing, relay_idle_mark_seq_for_testing,
|
||||||
|
relay_pressure_event_seq_for_testing, should_emit_full_desync_for_testing,
|
||||||
|
};
|
||||||
|
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||||
|
use crate::proxy::shared_state::ProxySharedState;
|
||||||
|
use crate::{
|
||||||
|
config::{ProxyConfig, UpstreamConfig, UpstreamType},
|
||||||
|
crypto::SecureRandom,
|
||||||
|
ip_tracker::UserIpTracker,
|
||||||
|
stats::{ReplayChecker, Stats, beobachten::BeobachtenStore},
|
||||||
|
stream::BufferPool,
|
||||||
|
transport::UpstreamManager,
|
||||||
|
};
|
||||||
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
use tokio::io::{AsyncWriteExt, duplex};
|
||||||
|
use tokio::sync::Barrier;
|
||||||
|
|
||||||
|
struct ClientHarness {
|
||||||
|
config: Arc<ProxyConfig>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_client_harness() -> ClientHarness {
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.censorship.mask = false;
|
||||||
|
cfg.general.modes.classic = true;
|
||||||
|
cfg.general.modes.secure = true;
|
||||||
|
let config = Arc::new(cfg);
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
|
||||||
|
let upstream_manager = Arc::new(UpstreamManager::new(
|
||||||
|
vec![UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct {
|
||||||
|
interface: None,
|
||||||
|
bind_addresses: None,
|
||||||
|
},
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
|
}],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
10,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
stats.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
ClientHarness {
|
||||||
|
config,
|
||||||
|
stats,
|
||||||
|
upstream_manager,
|
||||||
|
replay_checker: Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
buffer_pool: Arc::new(BufferPool::new()),
|
||||||
|
rng: Arc::new(SecureRandom::new()),
|
||||||
|
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
ip_tracker: Arc::new(UserIpTracker::new()),
|
||||||
|
beobachten: Arc::new(BeobachtenStore::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn drive_invalid_mtproto_handshake(shared: Arc<ProxySharedState>, peer: std::net::SocketAddr) {
|
||||||
|
let harness = new_client_harness();
|
||||||
|
let (server_side, mut client_side) = duplex(4096);
|
||||||
|
let invalid = [0u8; 64];
|
||||||
|
|
||||||
|
let task = tokio::spawn(handle_client_stream_with_shared(
|
||||||
|
server_side,
|
||||||
|
peer,
|
||||||
|
harness.config,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
shared,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side
|
||||||
|
.write_all(&invalid)
|
||||||
|
.await
|
||||||
|
.expect("failed to write invalid handshake");
|
||||||
|
client_side.shutdown().await.expect("failed to shutdown client");
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), task)
|
||||||
|
.await
|
||||||
|
.expect("client task timed out")
|
||||||
|
.expect("client task join failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_two_instances_do_not_share_auth_probe_state() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
|
||||||
|
|
||||||
|
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 10));
|
||||||
|
auth_probe_record_failure_for_testing(a.as_ref(), ip, Instant::now());
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip),
|
||||||
|
Some(1)
|
||||||
|
);
|
||||||
|
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_two_instances_do_not_share_desync_dedup() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_desync_dedup_for_testing_in_shared(a.as_ref());
|
||||||
|
|
||||||
|
let now = Instant::now();
|
||||||
|
let key = 0xA5A5_u64;
|
||||||
|
assert!(should_emit_full_desync_for_testing(a.as_ref(), key, false, now));
|
||||||
|
assert!(should_emit_full_desync_for_testing(b.as_ref(), key, false, now));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_two_instances_do_not_share_idle_registry() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref());
|
||||||
|
|
||||||
|
assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 111));
|
||||||
|
assert_eq!(oldest_relay_idle_candidate_for_testing(a.as_ref()), Some(111));
|
||||||
|
assert_eq!(oldest_relay_idle_candidate_for_testing(b.as_ref()), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_reset_in_one_instance_does_not_affect_another() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
|
||||||
|
|
||||||
|
let ip_a = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 1));
|
||||||
|
let ip_b = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 2));
|
||||||
|
let now = Instant::now();
|
||||||
|
|
||||||
|
auth_probe_record_failure_for_testing(a.as_ref(), ip_a, now);
|
||||||
|
auth_probe_record_failure_for_testing(b.as_ref(), ip_b, now);
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
|
||||||
|
|
||||||
|
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip_a), None);
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip_b),
|
||||||
|
Some(1)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_parallel_auth_probe_updates_stay_per_instance() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
|
||||||
|
|
||||||
|
let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 77));
|
||||||
|
let now = Instant::now();
|
||||||
|
|
||||||
|
for _ in 0..5 {
|
||||||
|
auth_probe_record_failure_for_testing(a.as_ref(), ip, now);
|
||||||
|
}
|
||||||
|
for _ in 0..3 {
|
||||||
|
auth_probe_record_failure_for_testing(b.as_ref(), ip, now + Duration::from_millis(1));
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip), Some(5));
|
||||||
|
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip), Some(3));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn proxy_shared_state_client_pipeline_records_probe_failures_in_instance_state() {
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
let peer_ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 200));
|
||||||
|
let peer = std::net::SocketAddr::new(peer_ip, 54001);
|
||||||
|
|
||||||
|
drive_invalid_mtproto_handshake(shared.clone(), peer).await;
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer_ip),
|
||||||
|
Some(1),
|
||||||
|
"invalid handshake in client pipeline must update injected shared auth-probe state"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn proxy_shared_state_client_pipeline_keeps_auth_probe_isolated_between_instances() {
|
||||||
|
let shared_a = ProxySharedState::new();
|
||||||
|
let shared_b = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref());
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(shared_b.as_ref());
|
||||||
|
|
||||||
|
let peer_a_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 210));
|
||||||
|
let peer_b_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 211));
|
||||||
|
|
||||||
|
drive_invalid_mtproto_handshake(
|
||||||
|
shared_a.clone(),
|
||||||
|
std::net::SocketAddr::new(peer_a_ip, 54110),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
drive_invalid_mtproto_handshake(
|
||||||
|
shared_b.clone(),
|
||||||
|
std::net::SocketAddr::new(peer_b_ip, 54111),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), peer_a_ip),
|
||||||
|
Some(1)
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), peer_b_ip),
|
||||||
|
Some(1)
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), peer_b_ip),
|
||||||
|
None
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), peer_a_ip),
|
||||||
|
None
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn proxy_shared_state_client_pipeline_high_contention_same_ip_stays_lossless_per_instance() {
|
||||||
|
let shared_a = ProxySharedState::new();
|
||||||
|
let shared_b = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref());
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(shared_b.as_ref());
|
||||||
|
|
||||||
|
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 250));
|
||||||
|
let workers = 48u16;
|
||||||
|
let barrier = Arc::new(Barrier::new((workers as usize) * 2));
|
||||||
|
let mut tasks = Vec::new();
|
||||||
|
|
||||||
|
for i in 0..workers {
|
||||||
|
let shared_a = shared_a.clone();
|
||||||
|
let barrier_a = barrier.clone();
|
||||||
|
let peer_a = std::net::SocketAddr::new(ip, 56000 + i);
|
||||||
|
tasks.push(tokio::spawn(async move {
|
||||||
|
barrier_a.wait().await;
|
||||||
|
drive_invalid_mtproto_handshake(shared_a, peer_a).await;
|
||||||
|
}));
|
||||||
|
|
||||||
|
let shared_b = shared_b.clone();
|
||||||
|
let barrier_b = barrier.clone();
|
||||||
|
let peer_b = std::net::SocketAddr::new(ip, 56100 + i);
|
||||||
|
tasks.push(tokio::spawn(async move {
|
||||||
|
barrier_b.wait().await;
|
||||||
|
drive_invalid_mtproto_handshake(shared_b, peer_b).await;
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for task in tasks {
|
||||||
|
task.await.expect("pipeline task join failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
let streak_a = auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip)
|
||||||
|
.expect("instance A must track probe failures");
|
||||||
|
let streak_b = auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip)
|
||||||
|
.expect("instance B must track probe failures");
|
||||||
|
|
||||||
|
assert!(streak_a > 0);
|
||||||
|
assert!(streak_b > 0);
|
||||||
|
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref());
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip),
|
||||||
|
None,
|
||||||
|
"clearing one instance must reset only that instance"
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip).is_some(),
|
||||||
|
"clearing one instance must not clear the other instance"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_auth_saturation_does_not_bleed_across_instances() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 77));
|
||||||
|
let future_now = Instant::now() + Duration::from_secs(1);
|
||||||
|
for _ in 0..8 {
|
||||||
|
auth_probe_record_failure_for_testing(a.as_ref(), ip, future_now);
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(auth_probe_is_throttled_for_testing_in_shared(a.as_ref(), ip));
|
||||||
|
assert!(!auth_probe_is_throttled_for_testing_in_shared(b.as_ref(), ip));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_poison_clear_in_one_instance_does_not_affect_other_instance() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
let ip_a = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 31));
|
||||||
|
let ip_b = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 32));
|
||||||
|
let now = Instant::now();
|
||||||
|
|
||||||
|
auth_probe_record_failure_for_testing(a.as_ref(), ip_a, now);
|
||||||
|
auth_probe_record_failure_for_testing(b.as_ref(), ip_b, now);
|
||||||
|
|
||||||
|
let a_for_poison = a.clone();
|
||||||
|
let _ = std::thread::spawn(move || {
|
||||||
|
let _hold = a_for_poison
|
||||||
|
.handshake
|
||||||
|
.auth_probe_saturation
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
panic!("intentional poison for per-instance isolation regression coverage");
|
||||||
|
})
|
||||||
|
.join();
|
||||||
|
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
|
||||||
|
|
||||||
|
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip_a), None);
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip_b),
|
||||||
|
Some(1),
|
||||||
|
"poison recovery and clear in one instance must not touch other instance state"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_unknown_sni_cooldown_does_not_bleed_across_instances() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_unknown_sni_warn_state_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_unknown_sni_warn_state_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
let now = Instant::now();
|
||||||
|
assert!(should_emit_unknown_sni_warn_for_testing_in_shared(
|
||||||
|
a.as_ref(),
|
||||||
|
now
|
||||||
|
));
|
||||||
|
assert!(should_emit_unknown_sni_warn_for_testing_in_shared(
|
||||||
|
b.as_ref(),
|
||||||
|
now
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_warned_secret_cache_does_not_bleed_across_instances() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_warned_secrets_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_warned_secrets_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
let key = ("isolation-user".to_string(), "invalid_hex".to_string());
|
||||||
|
{
|
||||||
|
let warned = warned_secrets_for_testing_in_shared(a.as_ref());
|
||||||
|
let mut guard = warned
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
guard.insert(key.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
let contains_in_a = {
|
||||||
|
let warned = warned_secrets_for_testing_in_shared(a.as_ref());
|
||||||
|
let guard = warned
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
guard.contains(&key)
|
||||||
|
};
|
||||||
|
let contains_in_b = {
|
||||||
|
let warned = warned_secrets_for_testing_in_shared(b.as_ref());
|
||||||
|
let guard = warned
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
guard.contains(&key)
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(contains_in_a);
|
||||||
|
assert!(!contains_in_b);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_idle_mark_seq_is_per_instance() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
assert_eq!(relay_idle_mark_seq_for_testing(a.as_ref()), 0);
|
||||||
|
assert_eq!(relay_idle_mark_seq_for_testing(b.as_ref()), 0);
|
||||||
|
|
||||||
|
assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 9001));
|
||||||
|
assert_eq!(relay_idle_mark_seq_for_testing(a.as_ref()), 1);
|
||||||
|
assert_eq!(relay_idle_mark_seq_for_testing(b.as_ref()), 0);
|
||||||
|
|
||||||
|
assert!(mark_relay_idle_candidate_for_testing(b.as_ref(), 9002));
|
||||||
|
assert_eq!(relay_idle_mark_seq_for_testing(a.as_ref()), 1);
|
||||||
|
assert_eq!(relay_idle_mark_seq_for_testing(b.as_ref()), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_unknown_sni_clear_in_one_instance_does_not_reset_other() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_unknown_sni_warn_state_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_unknown_sni_warn_state_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
let now = Instant::now();
|
||||||
|
assert!(should_emit_unknown_sni_warn_for_testing_in_shared(
|
||||||
|
a.as_ref(),
|
||||||
|
now
|
||||||
|
));
|
||||||
|
assert!(should_emit_unknown_sni_warn_for_testing_in_shared(
|
||||||
|
b.as_ref(),
|
||||||
|
now
|
||||||
|
));
|
||||||
|
|
||||||
|
clear_unknown_sni_warn_state_for_testing_in_shared(a.as_ref());
|
||||||
|
assert!(should_emit_unknown_sni_warn_for_testing_in_shared(
|
||||||
|
a.as_ref(),
|
||||||
|
now + Duration::from_millis(1)
|
||||||
|
));
|
||||||
|
assert!(!should_emit_unknown_sni_warn_for_testing_in_shared(
|
||||||
|
b.as_ref(),
|
||||||
|
now + Duration::from_millis(1)
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_warned_secret_clear_in_one_instance_does_not_clear_other() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_warned_secrets_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_warned_secrets_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
let key = ("clear-isolation-user".to_string(), "invalid_length".to_string());
|
||||||
|
{
|
||||||
|
let warned_a = warned_secrets_for_testing_in_shared(a.as_ref());
|
||||||
|
let mut guard_a = warned_a
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
guard_a.insert(key.clone());
|
||||||
|
|
||||||
|
let warned_b = warned_secrets_for_testing_in_shared(b.as_ref());
|
||||||
|
let mut guard_b = warned_b
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
guard_b.insert(key.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
clear_warned_secrets_for_testing_in_shared(a.as_ref());
|
||||||
|
|
||||||
|
let has_a = {
|
||||||
|
let warned = warned_secrets_for_testing_in_shared(a.as_ref());
|
||||||
|
let guard = warned
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
guard.contains(&key)
|
||||||
|
};
|
||||||
|
let has_b = {
|
||||||
|
let warned = warned_secrets_for_testing_in_shared(b.as_ref());
|
||||||
|
let guard = warned
|
||||||
|
.lock()
|
||||||
|
.unwrap_or_else(|poisoned| poisoned.into_inner());
|
||||||
|
guard.contains(&key)
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(!has_a);
|
||||||
|
assert!(has_b);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_desync_duplicate_suppression_is_instance_scoped() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_desync_dedup_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_desync_dedup_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
let now = Instant::now();
|
||||||
|
let key = 0xBEEF_0000_0000_0001u64;
|
||||||
|
assert!(should_emit_full_desync_for_testing(a.as_ref(), key, false, now));
|
||||||
|
assert!(!should_emit_full_desync_for_testing(
|
||||||
|
a.as_ref(),
|
||||||
|
key,
|
||||||
|
false,
|
||||||
|
now + Duration::from_millis(1)
|
||||||
|
));
|
||||||
|
assert!(should_emit_full_desync_for_testing(b.as_ref(), key, false, now));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_desync_clear_in_one_instance_does_not_clear_other() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_desync_dedup_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_desync_dedup_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
let now = Instant::now();
|
||||||
|
let key = 0xCAFE_0000_0000_0001u64;
|
||||||
|
assert!(should_emit_full_desync_for_testing(a.as_ref(), key, false, now));
|
||||||
|
assert!(should_emit_full_desync_for_testing(b.as_ref(), key, false, now));
|
||||||
|
|
||||||
|
clear_desync_dedup_for_testing_in_shared(a.as_ref());
|
||||||
|
|
||||||
|
assert!(should_emit_full_desync_for_testing(
|
||||||
|
a.as_ref(),
|
||||||
|
key,
|
||||||
|
false,
|
||||||
|
now + Duration::from_millis(2)
|
||||||
|
));
|
||||||
|
assert!(!should_emit_full_desync_for_testing(
|
||||||
|
b.as_ref(),
|
||||||
|
key,
|
||||||
|
false,
|
||||||
|
now + Duration::from_millis(2)
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_idle_candidate_clear_in_one_instance_does_not_affect_other() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 1001));
|
||||||
|
assert!(mark_relay_idle_candidate_for_testing(b.as_ref(), 2002));
|
||||||
|
clear_relay_idle_candidate_for_testing(a.as_ref(), 1001);
|
||||||
|
|
||||||
|
assert_eq!(oldest_relay_idle_candidate_for_testing(a.as_ref()), None);
|
||||||
|
assert_eq!(oldest_relay_idle_candidate_for_testing(b.as_ref()), Some(2002));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_pressure_seq_increments_are_instance_scoped() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
assert_eq!(relay_pressure_event_seq_for_testing(a.as_ref()), 0);
|
||||||
|
assert_eq!(relay_pressure_event_seq_for_testing(b.as_ref()), 0);
|
||||||
|
|
||||||
|
note_relay_pressure_event_for_testing(a.as_ref());
|
||||||
|
note_relay_pressure_event_for_testing(a.as_ref());
|
||||||
|
|
||||||
|
assert_eq!(relay_pressure_event_seq_for_testing(a.as_ref()), 2);
|
||||||
|
assert_eq!(relay_pressure_event_seq_for_testing(b.as_ref()), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn proxy_shared_state_pressure_consumption_does_not_cross_instances() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_relay_idle_pressure_state_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_relay_idle_pressure_state_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
assert!(mark_relay_idle_candidate_for_testing(a.as_ref(), 7001));
|
||||||
|
assert!(mark_relay_idle_candidate_for_testing(b.as_ref(), 7001));
|
||||||
|
note_relay_pressure_event_for_testing(a.as_ref());
|
||||||
|
|
||||||
|
let stats = Stats::new();
|
||||||
|
let mut seen_a = 0u64;
|
||||||
|
let mut seen_b = 0u64;
|
||||||
|
|
||||||
|
assert!(maybe_evict_idle_candidate_on_pressure_for_testing(
|
||||||
|
a.as_ref(),
|
||||||
|
7001,
|
||||||
|
&mut seen_a,
|
||||||
|
&stats
|
||||||
|
));
|
||||||
|
assert!(!maybe_evict_idle_candidate_on_pressure_for_testing(
|
||||||
|
b.as_ref(),
|
||||||
|
7001,
|
||||||
|
&mut seen_b,
|
||||||
|
&stats
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,255 @@
|
||||||
|
use crate::proxy::handshake::{
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared, auth_probe_record_failure_for_testing,
|
||||||
|
clear_auth_probe_state_for_testing_in_shared, clear_unknown_sni_warn_state_for_testing_in_shared,
|
||||||
|
should_emit_unknown_sni_warn_for_testing_in_shared,
|
||||||
|
};
|
||||||
|
use crate::proxy::middle_relay::{
|
||||||
|
clear_desync_dedup_for_testing_in_shared, clear_relay_idle_pressure_state_for_testing_in_shared,
|
||||||
|
mark_relay_idle_candidate_for_testing, oldest_relay_idle_candidate_for_testing,
|
||||||
|
should_emit_full_desync_for_testing,
|
||||||
|
};
|
||||||
|
use crate::proxy::shared_state::ProxySharedState;
|
||||||
|
use rand::SeedableRng;
|
||||||
|
use rand::RngExt;
|
||||||
|
use rand::rngs::StdRng;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Instant;
|
||||||
|
use tokio::sync::Barrier;
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn proxy_shared_state_50_concurrent_instances_no_counter_bleed() {
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
for i in 0..50_u8 {
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 200));
|
||||||
|
auth_probe_record_failure_for_testing(shared.as_ref(), ip, Instant::now());
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip)
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
let streak = handle.await.expect("task join failed");
|
||||||
|
assert_eq!(streak, Some(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn proxy_shared_state_desync_rotation_concurrent_20_instances() {
|
||||||
|
let now = Instant::now();
|
||||||
|
let key = 0xD35E_D35E_u64;
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
for _ in 0..20_u64 {
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
|
clear_desync_dedup_for_testing_in_shared(shared.as_ref());
|
||||||
|
should_emit_full_desync_for_testing(shared.as_ref(), key, false, now)
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
let emitted = handle.await.expect("task join failed");
|
||||||
|
assert!(emitted);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn proxy_shared_state_idle_registry_concurrent_10_instances() {
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
let conn_id = 42_u64;
|
||||||
|
for _ in 1..=10_u64 {
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
|
clear_relay_idle_pressure_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
let marked = mark_relay_idle_candidate_for_testing(shared.as_ref(), conn_id);
|
||||||
|
let oldest = oldest_relay_idle_candidate_for_testing(shared.as_ref());
|
||||||
|
(marked, oldest)
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (i, handle) in handles.into_iter().enumerate() {
|
||||||
|
let (marked, oldest) = handle.await.expect("task join failed");
|
||||||
|
assert!(marked, "instance {} failed to mark", i);
|
||||||
|
assert_eq!(oldest, Some(conn_id));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn proxy_shared_state_dual_instance_same_ip_high_contention_no_counter_bleed() {
|
||||||
|
let a = ProxySharedState::new();
|
||||||
|
let b = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(a.as_ref());
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(b.as_ref());
|
||||||
|
|
||||||
|
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 200));
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
|
||||||
|
for _ in 0..64 {
|
||||||
|
let a = a.clone();
|
||||||
|
let b = b.clone();
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
auth_probe_record_failure_for_testing(a.as_ref(), ip, Instant::now());
|
||||||
|
auth_probe_record_failure_for_testing(b.as_ref(), ip, Instant::now());
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
handle.await.expect("task join failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(a.as_ref(), ip), Some(64));
|
||||||
|
assert_eq!(auth_probe_fail_streak_for_testing_in_shared(b.as_ref(), ip), Some(64));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn proxy_shared_state_unknown_sni_parallel_instances_no_cross_cooldown() {
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
let now = Instant::now();
|
||||||
|
|
||||||
|
for _ in 0..32 {
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
|
clear_unknown_sni_warn_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
let first = should_emit_unknown_sni_warn_for_testing_in_shared(shared.as_ref(), now);
|
||||||
|
let second = should_emit_unknown_sni_warn_for_testing_in_shared(
|
||||||
|
shared.as_ref(),
|
||||||
|
now + std::time::Duration::from_millis(1),
|
||||||
|
);
|
||||||
|
(first, second)
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
let (first, second) = handle.await.expect("task join failed");
|
||||||
|
assert!(first);
|
||||||
|
assert!(!second);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn proxy_shared_state_auth_probe_high_contention_increments_are_lossless() {
|
||||||
|
let shared = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||||
|
|
||||||
|
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 33));
|
||||||
|
let workers = 128usize;
|
||||||
|
let rounds = 20usize;
|
||||||
|
|
||||||
|
for _ in 0..rounds {
|
||||||
|
let start = Arc::new(Barrier::new(workers));
|
||||||
|
let mut handles = Vec::with_capacity(workers);
|
||||||
|
|
||||||
|
for _ in 0..workers {
|
||||||
|
let shared = shared.clone();
|
||||||
|
let start = start.clone();
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
start.wait().await;
|
||||||
|
auth_probe_record_failure_for_testing(shared.as_ref(), ip, Instant::now());
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
handle.await.expect("task join failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let expected = (workers * rounds) as u32;
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip),
|
||||||
|
Some(expected),
|
||||||
|
"auth probe fail streak must account for every concurrent update"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn proxy_shared_state_seed_matrix_concurrency_isolation_no_counter_bleed() {
|
||||||
|
let seeds: [u64; 8] = [
|
||||||
|
0x0000_0000_0000_0001,
|
||||||
|
0x1111_1111_1111_1111,
|
||||||
|
0xA5A5_A5A5_A5A5_A5A5,
|
||||||
|
0xDEAD_BEEF_CAFE_BABE,
|
||||||
|
0x0123_4567_89AB_CDEF,
|
||||||
|
0xFEDC_BA98_7654_3210,
|
||||||
|
0x0F0F_F0F0_55AA_AA55,
|
||||||
|
0x1357_9BDF_2468_ACE0,
|
||||||
|
];
|
||||||
|
|
||||||
|
for seed in seeds {
|
||||||
|
let mut rng = StdRng::seed_from_u64(seed);
|
||||||
|
let shared_a = ProxySharedState::new();
|
||||||
|
let shared_b = ProxySharedState::new();
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref());
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(shared_b.as_ref());
|
||||||
|
|
||||||
|
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||||
|
198,
|
||||||
|
51,
|
||||||
|
100,
|
||||||
|
rng.random_range(1_u8..=250_u8),
|
||||||
|
));
|
||||||
|
let workers = rng.random_range(16_usize..=48_usize);
|
||||||
|
let rounds = rng.random_range(4_usize..=10_usize);
|
||||||
|
|
||||||
|
let mut expected_a: u32 = 0;
|
||||||
|
let mut expected_b: u32 = 0;
|
||||||
|
|
||||||
|
for _ in 0..rounds {
|
||||||
|
let start = Arc::new(Barrier::new(workers * 2));
|
||||||
|
let mut handles = Vec::with_capacity(workers * 2);
|
||||||
|
|
||||||
|
for _ in 0..workers {
|
||||||
|
let a_ops = rng.random_range(1_u32..=3_u32);
|
||||||
|
let b_ops = rng.random_range(1_u32..=3_u32);
|
||||||
|
expected_a = expected_a.saturating_add(a_ops);
|
||||||
|
expected_b = expected_b.saturating_add(b_ops);
|
||||||
|
|
||||||
|
let shared_a = shared_a.clone();
|
||||||
|
let start_a = start.clone();
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
start_a.wait().await;
|
||||||
|
for _ in 0..a_ops {
|
||||||
|
auth_probe_record_failure_for_testing(shared_a.as_ref(), ip, Instant::now());
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
let shared_b = shared_b.clone();
|
||||||
|
let start_b = start.clone();
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
start_b.wait().await;
|
||||||
|
for _ in 0..b_ops {
|
||||||
|
auth_probe_record_failure_for_testing(shared_b.as_ref(), ip, Instant::now());
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
handle.await.expect("task join failed");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip),
|
||||||
|
Some(expected_a),
|
||||||
|
"seed {seed:#x}: instance A streak mismatch"
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip),
|
||||||
|
Some(expected_b),
|
||||||
|
"seed {seed:#x}: instance B streak mismatch"
|
||||||
|
);
|
||||||
|
|
||||||
|
clear_auth_probe_state_for_testing_in_shared(shared_a.as_ref());
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared_a.as_ref(), ip),
|
||||||
|
None,
|
||||||
|
"seed {seed:#x}: clearing A must reset only A"
|
||||||
|
);
|
||||||
|
assert_eq!(
|
||||||
|
auth_probe_fail_streak_for_testing_in_shared(shared_b.as_ref(), ip),
|
||||||
|
Some(expected_b),
|
||||||
|
"seed {seed:#x}: clearing A must not mutate B"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue