Add security tests for middle relay idle policy and enhance stats tracking

- Introduced a new test module for middle relay idle policy security tests, covering various scenarios including soft mark, hard close, and grace periods.
- Implemented functions to create crypto readers and encrypt data for testing.
- Enhanced the Stats struct to include counters for relay idle soft marks, hard closes, pressure evictions, and protocol desync closes.
- Added corresponding increment and retrieval methods for the new stats fields.
This commit is contained in:
David Osipov
2026-03-20 16:43:50 +04:00
parent 5c5fdcb124
commit 512bee6a8d
8 changed files with 1571 additions and 18 deletions

View File

@@ -1,4 +1,5 @@
use std::collections::hash_map::RandomState;
use std::collections::{BTreeSet, HashMap};
use std::hash::BuildHasher;
use std::hash::{Hash, Hasher};
use std::net::{IpAddr, SocketAddr};
@@ -10,7 +11,7 @@ use dashmap::DashMap;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
use tokio::sync::{mpsc, oneshot, watch, Mutex as AsyncMutex};
use tokio::time::timeout;
use tracing::{debug, trace, warn};
use tracing::{debug, info, trace, warn};
use crate::config::ProxyConfig;
use crate::crypto::SecureRandom;
@@ -38,6 +39,7 @@ const DESYNC_ERROR_CLASS: &str = "frame_too_large_crypto_desync";
const C2ME_CHANNEL_CAPACITY_FALLBACK: usize = 128;
const C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS: usize = 64;
const C2ME_SENDER_FAIRNESS_BUDGET: usize = 32;
const RELAY_IDLE_IO_POLL_MAX: Duration = Duration::from_secs(1);
#[cfg(test)]
const C2ME_SEND_TIMEOUT: Duration = Duration::from_millis(50);
#[cfg(not(test))]
@@ -53,6 +55,8 @@ static DESYNC_HASHER: OnceLock<RandomState> = OnceLock::new();
static DESYNC_FULL_CACHE_LAST_EMIT_AT: OnceLock<Mutex<Option<Instant>>> = OnceLock::new();
static DESYNC_DEDUP_EVER_SATURATED: OnceLock<AtomicBool> = OnceLock::new();
static QUOTA_USER_LOCKS: OnceLock<DashMap<String, Arc<AsyncMutex<()>>>> = OnceLock::new();
static RELAY_IDLE_CANDIDATE_REGISTRY: OnceLock<Mutex<RelayIdleCandidateRegistry>> = OnceLock::new();
static RELAY_IDLE_MARK_SEQ: AtomicU64 = AtomicU64::new(0);
struct RelayForensicsState {
trace_id: u64,
@@ -66,6 +70,140 @@ struct RelayForensicsState {
desync_all_full: bool,
}
#[derive(Default)]
struct RelayIdleCandidateRegistry {
by_conn_id: HashMap<u64, RelayIdleCandidateMeta>,
ordered: BTreeSet<(u64, u64)>,
pressure_event_seq: u64,
pressure_consumed_seq: u64,
}
#[derive(Clone, Copy)]
struct RelayIdleCandidateMeta {
mark_order_seq: u64,
mark_pressure_seq: u64,
}
fn relay_idle_candidate_registry() -> &'static Mutex<RelayIdleCandidateRegistry> {
RELAY_IDLE_CANDIDATE_REGISTRY.get_or_init(|| Mutex::new(RelayIdleCandidateRegistry::default()))
}
fn mark_relay_idle_candidate(conn_id: u64) -> bool {
let Ok(mut guard) = relay_idle_candidate_registry().lock() else {
return false;
};
if guard.by_conn_id.contains_key(&conn_id) {
return false;
}
let mark_order_seq = RELAY_IDLE_MARK_SEQ
.fetch_add(1, Ordering::Relaxed)
.saturating_add(1);
let meta = RelayIdleCandidateMeta {
mark_order_seq,
mark_pressure_seq: guard.pressure_event_seq,
};
guard.by_conn_id.insert(conn_id, meta);
guard.ordered.insert((meta.mark_order_seq, conn_id));
true
}
fn clear_relay_idle_candidate(conn_id: u64) {
let Ok(mut guard) = relay_idle_candidate_registry().lock() else {
return;
};
if let Some(meta) = guard.by_conn_id.remove(&conn_id) {
guard.ordered.remove(&(meta.mark_order_seq, conn_id));
}
}
#[cfg(test)]
fn oldest_relay_idle_candidate() -> Option<u64> {
let Ok(guard) = relay_idle_candidate_registry().lock() else {
return None;
};
guard.ordered.iter().next().map(|(_, conn_id)| *conn_id)
}
fn note_relay_pressure_event() {
let Ok(mut guard) = relay_idle_candidate_registry().lock() else {
return;
};
guard.pressure_event_seq = guard.pressure_event_seq.wrapping_add(1);
}
fn relay_pressure_event_seq() -> u64 {
let Ok(guard) = relay_idle_candidate_registry().lock() else {
return 0;
};
guard.pressure_event_seq
}
fn maybe_evict_idle_candidate_on_pressure(
conn_id: u64,
seen_pressure_seq: &mut u64,
stats: &Stats,
) -> bool {
let Ok(mut guard) = relay_idle_candidate_registry().lock() else {
return false;
};
let latest_pressure_seq = guard.pressure_event_seq;
if latest_pressure_seq == *seen_pressure_seq {
return false;
}
*seen_pressure_seq = latest_pressure_seq;
if latest_pressure_seq == guard.pressure_consumed_seq {
return false;
}
if guard.ordered.is_empty() {
guard.pressure_consumed_seq = latest_pressure_seq;
return false;
}
let oldest = guard
.ordered
.iter()
.next()
.map(|(_, candidate_conn_id)| *candidate_conn_id);
if oldest != Some(conn_id) {
return false;
}
let Some(candidate_meta) = guard.by_conn_id.get(&conn_id).copied() else {
return false;
};
// Pressure events that happened before candidate soft-mark are stale for this candidate.
if latest_pressure_seq == candidate_meta.mark_pressure_seq {
return false;
}
if let Some(meta) = guard.by_conn_id.remove(&conn_id) {
guard.ordered.remove(&(meta.mark_order_seq, conn_id));
}
guard.pressure_consumed_seq = latest_pressure_seq;
stats.increment_relay_pressure_evict_total();
true
}
#[cfg(test)]
fn clear_relay_idle_pressure_state_for_testing() {
if let Some(registry) = RELAY_IDLE_CANDIDATE_REGISTRY.get()
&& let Ok(mut guard) = registry.lock()
{
guard.by_conn_id.clear();
guard.ordered.clear();
guard.pressure_event_seq = 0;
guard.pressure_consumed_seq = 0;
}
RELAY_IDLE_MARK_SEQ.store(0, Ordering::Relaxed);
}
#[derive(Clone, Copy)]
struct MeD2cFlushPolicy {
max_frames: usize,
@@ -74,6 +212,61 @@ struct MeD2cFlushPolicy {
ack_flush_immediate: bool,
}
#[derive(Clone, Copy)]
struct RelayClientIdlePolicy {
enabled: bool,
soft_idle: Duration,
hard_idle: Duration,
grace_after_downstream_activity: Duration,
legacy_frame_read_timeout: Duration,
}
impl RelayClientIdlePolicy {
fn from_config(config: &ProxyConfig) -> Self {
Self {
enabled: config.timeouts.relay_idle_policy_v2_enabled,
soft_idle: Duration::from_secs(config.timeouts.relay_client_idle_soft_secs.max(1)),
hard_idle: Duration::from_secs(config.timeouts.relay_client_idle_hard_secs.max(1)),
grace_after_downstream_activity: Duration::from_secs(
config
.timeouts
.relay_idle_grace_after_downstream_activity_secs,
),
legacy_frame_read_timeout: Duration::from_secs(config.timeouts.client_handshake.max(1)),
}
}
#[cfg(test)]
fn disabled(frame_read_timeout: Duration) -> Self {
Self {
enabled: false,
soft_idle: Duration::from_secs(0),
hard_idle: Duration::from_secs(0),
grace_after_downstream_activity: Duration::from_secs(0),
legacy_frame_read_timeout: frame_read_timeout,
}
}
}
struct RelayClientIdleState {
last_client_frame_at: Instant,
soft_idle_marked: bool,
}
impl RelayClientIdleState {
fn new(now: Instant) -> Self {
Self {
last_client_frame_at: now,
soft_idle_marked: false,
}
}
fn on_client_frame(&mut self, now: Instant) {
self.last_client_frame_at = now;
self.soft_idle_marked = false;
}
}
impl MeD2cFlushPolicy {
fn from_config(config: &ProxyConfig) -> Self {
Self {
@@ -251,6 +444,7 @@ fn report_desync_frame_too_large(
let bytes_me2c = state.bytes_me2c.load(Ordering::Relaxed);
stats.increment_desync_total();
stats.increment_relay_protocol_desync_close_total();
stats.observe_desync_frames_ok(frame_counter);
if emit_full {
stats.increment_desync_full_logged();
@@ -366,6 +560,7 @@ async fn enqueue_c2me_command(
Ok(()) => Ok(()),
Err(mpsc::error::TrySendError::Closed(cmd)) => Err(mpsc::error::SendError(cmd)),
Err(mpsc::error::TrySendError::Full(cmd)) => {
note_relay_pressure_event();
// Cooperative yield reduces burst catch-up when the per-conn queue is near saturation.
if tx.capacity() <= C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS {
tokio::task::yield_now().await;
@@ -483,6 +678,10 @@ where
let translated_local_addr = me_pool.translate_our_addr(local_addr);
let frame_limit = config.general.max_client_frame;
let relay_idle_policy = RelayClientIdlePolicy::from_config(&config);
let session_started_at = forensics.started_at;
let mut relay_idle_state = RelayClientIdleState::new(session_started_at);
let last_downstream_activity_ms = Arc::new(AtomicU64::new(0));
let c2me_channel_capacity = config
.general
@@ -525,6 +724,7 @@ where
let stats_clone = stats.clone();
let rng_clone = rng.clone();
let user_clone = user.clone();
let last_downstream_activity_ms_clone = last_downstream_activity_ms.clone();
let bytes_me2c_clone = bytes_me2c.clone();
let d2c_flush_policy = MeD2cFlushPolicy::from_config(&config);
let me_writer = tokio::spawn(async move {
@@ -542,6 +742,8 @@ where
let mut batch_bytes = 0usize;
let mut flush_immediately;
let first_is_downstream_activity =
matches!(&first, MeResponse::Data { .. } | MeResponse::Ack(_));
match process_me_writer_response(
first,
&mut writer,
@@ -557,6 +759,10 @@ where
false,
).await? {
MeWriterResponseOutcome::Continue { frames, bytes, flush_immediately: immediate } => {
if first_is_downstream_activity {
last_downstream_activity_ms_clone
.store(session_started_at.elapsed().as_millis() as u64, Ordering::Relaxed);
}
batch_frames = batch_frames.saturating_add(frames);
batch_bytes = batch_bytes.saturating_add(bytes);
flush_immediately = immediate;
@@ -575,6 +781,8 @@ where
break;
};
let next_is_downstream_activity =
matches!(&next, MeResponse::Data { .. } | MeResponse::Ack(_));
match process_me_writer_response(
next,
&mut writer,
@@ -590,6 +798,10 @@ where
true,
).await? {
MeWriterResponseOutcome::Continue { frames, bytes, flush_immediately: immediate } => {
if next_is_downstream_activity {
last_downstream_activity_ms_clone
.store(session_started_at.elapsed().as_millis() as u64, Ordering::Relaxed);
}
batch_frames = batch_frames.saturating_add(frames);
batch_bytes = batch_bytes.saturating_add(bytes);
flush_immediately |= immediate;
@@ -608,6 +820,8 @@ where
{
match tokio::time::timeout(d2c_flush_policy.max_delay, me_rx_task.recv()).await {
Ok(Some(next)) => {
let next_is_downstream_activity =
matches!(&next, MeResponse::Data { .. } | MeResponse::Ack(_));
match process_me_writer_response(
next,
&mut writer,
@@ -623,6 +837,10 @@ where
true,
).await? {
MeWriterResponseOutcome::Continue { frames, bytes, flush_immediately: immediate } => {
if next_is_downstream_activity {
last_downstream_activity_ms_clone
.store(session_started_at.elapsed().as_millis() as u64, Ordering::Relaxed);
}
batch_frames = batch_frames.saturating_add(frames);
batch_bytes = batch_bytes.saturating_add(bytes);
flush_immediately |= immediate;
@@ -641,6 +859,8 @@ where
break;
};
let extra_is_downstream_activity =
matches!(&extra, MeResponse::Data { .. } | MeResponse::Ack(_));
match process_me_writer_response(
extra,
&mut writer,
@@ -656,6 +876,10 @@ where
true,
).await? {
MeWriterResponseOutcome::Continue { frames, bytes, flush_immediately: immediate } => {
if extra_is_downstream_activity {
last_downstream_activity_ms_clone
.store(session_started_at.elapsed().as_millis() as u64, Ordering::Relaxed);
}
batch_frames = batch_frames.saturating_add(frames);
batch_bytes = batch_bytes.saturating_add(bytes);
flush_immediately |= immediate;
@@ -689,7 +913,24 @@ where
let mut client_closed = false;
let mut frame_counter: u64 = 0;
let mut route_watch_open = true;
let mut seen_pressure_seq = relay_pressure_event_seq();
loop {
if relay_idle_policy.enabled
&& maybe_evict_idle_candidate_on_pressure(conn_id, &mut seen_pressure_seq, stats.as_ref())
{
info!(
conn_id,
trace_id = format_args!("0x{:016x}", trace_id),
user = %user,
"Middle-relay pressure eviction for idle-candidate session"
);
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
main_result = Err(ProxyError::Proxy(
"middle-relay session evicted under pressure (idle-candidate)".to_string(),
));
break;
}
if let Some(cutover) = affected_cutover_state(
&route_rx,
RelayRouteMode::Middle,
@@ -715,15 +956,18 @@ where
route_watch_open = false;
}
}
payload_result = read_client_payload(
payload_result = read_client_payload_with_idle_policy(
&mut crypto_reader,
proto_tag,
frame_limit,
Duration::from_secs(config.timeouts.client_handshake.max(1)),
&buffer_pool,
&forensics,
&mut frame_counter,
&stats,
&relay_idle_policy,
&mut relay_idle_state,
last_downstream_activity_ms.as_ref(),
session_started_at,
) => {
match payload_result {
Ok(Some((payload, quickack))) => {
@@ -812,46 +1056,181 @@ where
frames_ok = frame_counter,
"ME relay cleanup"
);
clear_relay_idle_candidate(conn_id);
me_pool.registry().unregister(conn_id).await;
result
}
async fn read_client_payload<R>(
async fn read_client_payload_with_idle_policy<R>(
client_reader: &mut CryptoReader<R>,
proto_tag: ProtoTag,
max_frame: usize,
frame_read_timeout: Duration,
buffer_pool: &Arc<BufferPool>,
forensics: &RelayForensicsState,
frame_counter: &mut u64,
stats: &Stats,
idle_policy: &RelayClientIdlePolicy,
idle_state: &mut RelayClientIdleState,
last_downstream_activity_ms: &AtomicU64,
session_started_at: Instant,
) -> Result<Option<(PooledBuffer, bool)>>
where
R: AsyncRead + Unpin + Send + 'static,
{
async fn read_exact_with_timeout<R>(
async fn read_exact_with_policy<R>(
client_reader: &mut CryptoReader<R>,
buf: &mut [u8],
frame_read_timeout: Duration,
idle_policy: &RelayClientIdlePolicy,
idle_state: &mut RelayClientIdleState,
last_downstream_activity_ms: &AtomicU64,
session_started_at: Instant,
forensics: &RelayForensicsState,
stats: &Stats,
read_label: &'static str,
) -> Result<()>
where
R: AsyncRead + Unpin + Send + 'static,
{
match timeout(frame_read_timeout, client_reader.read_exact(buf)).await {
Ok(Ok(_)) => Ok(()),
Ok(Err(e)) => Err(ProxyError::Io(e)),
Err(_) => Err(ProxyError::Io(std::io::Error::new(
std::io::ErrorKind::TimedOut,
"middle-relay client frame read timeout",
))),
fn hard_deadline(
idle_policy: &RelayClientIdlePolicy,
idle_state: &RelayClientIdleState,
session_started_at: Instant,
last_downstream_activity_ms: u64,
) -> Instant {
let mut deadline = idle_state.last_client_frame_at + idle_policy.hard_idle;
if idle_policy.grace_after_downstream_activity.is_zero() {
return deadline;
}
let downstream_at = session_started_at + Duration::from_millis(last_downstream_activity_ms);
if downstream_at > idle_state.last_client_frame_at {
let grace_deadline = downstream_at + idle_policy.grace_after_downstream_activity;
if grace_deadline > deadline {
deadline = grace_deadline;
}
}
deadline
}
let mut filled = 0usize;
while filled < buf.len() {
let timeout_window = if idle_policy.enabled {
let now = Instant::now();
let downstream_ms = last_downstream_activity_ms.load(Ordering::Relaxed);
let hard_deadline = hard_deadline(
idle_policy,
idle_state,
session_started_at,
downstream_ms,
);
if now >= hard_deadline {
clear_relay_idle_candidate(forensics.conn_id);
stats.increment_relay_idle_hard_close_total();
let client_idle_secs = now
.saturating_duration_since(idle_state.last_client_frame_at)
.as_secs();
let downstream_idle_secs = now
.saturating_duration_since(session_started_at + Duration::from_millis(downstream_ms))
.as_secs();
warn!(
trace_id = format_args!("0x{:016x}", forensics.trace_id),
conn_id = forensics.conn_id,
user = %forensics.user,
read_label,
client_idle_secs,
downstream_idle_secs,
soft_idle_secs = idle_policy.soft_idle.as_secs(),
hard_idle_secs = idle_policy.hard_idle.as_secs(),
grace_secs = idle_policy.grace_after_downstream_activity.as_secs(),
"Middle-relay hard idle close"
);
return Err(ProxyError::Io(std::io::Error::new(
std::io::ErrorKind::TimedOut,
format!(
"middle-relay hard idle timeout while reading {read_label}: client_idle_secs={client_idle_secs}, downstream_idle_secs={downstream_idle_secs}, soft_idle_secs={}, hard_idle_secs={}, grace_secs={}",
idle_policy.soft_idle.as_secs(),
idle_policy.hard_idle.as_secs(),
idle_policy.grace_after_downstream_activity.as_secs(),
),
)));
}
if !idle_state.soft_idle_marked
&& now.saturating_duration_since(idle_state.last_client_frame_at)
>= idle_policy.soft_idle
{
idle_state.soft_idle_marked = true;
if mark_relay_idle_candidate(forensics.conn_id) {
stats.increment_relay_idle_soft_mark_total();
}
info!(
trace_id = format_args!("0x{:016x}", forensics.trace_id),
conn_id = forensics.conn_id,
user = %forensics.user,
read_label,
soft_idle_secs = idle_policy.soft_idle.as_secs(),
hard_idle_secs = idle_policy.hard_idle.as_secs(),
grace_secs = idle_policy.grace_after_downstream_activity.as_secs(),
"Middle-relay soft idle mark"
);
}
let soft_deadline = idle_state.last_client_frame_at + idle_policy.soft_idle;
let next_deadline = if idle_state.soft_idle_marked {
hard_deadline
} else {
soft_deadline.min(hard_deadline)
};
let mut remaining = next_deadline.saturating_duration_since(now);
if remaining.is_zero() {
remaining = Duration::from_millis(1);
}
remaining.min(RELAY_IDLE_IO_POLL_MAX)
} else {
idle_policy.legacy_frame_read_timeout
};
let read_result = timeout(timeout_window, client_reader.read(&mut buf[filled..])).await;
match read_result {
Ok(Ok(0)) => {
return Err(ProxyError::Io(std::io::Error::from(
std::io::ErrorKind::UnexpectedEof,
)));
}
Ok(Ok(n)) => {
filled = filled.saturating_add(n);
}
Ok(Err(e)) => return Err(ProxyError::Io(e)),
Err(_) if !idle_policy.enabled => {
return Err(ProxyError::Io(std::io::Error::new(
std::io::ErrorKind::TimedOut,
format!("middle-relay client frame read timeout while reading {read_label}"),
)));
}
Err(_) => {}
}
}
Ok(())
}
loop {
let (len, quickack, raw_len_bytes) = match proto_tag {
ProtoTag::Abridged => {
let mut first = [0u8; 1];
match read_exact_with_timeout(client_reader, &mut first, frame_read_timeout).await {
match read_exact_with_policy(
client_reader,
&mut first,
idle_policy,
idle_state,
last_downstream_activity_ms,
session_started_at,
forensics,
stats,
"abridged.first_len_byte",
)
.await
{
Ok(()) => {}
Err(ProxyError::Io(e)) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
return Ok(None);
@@ -862,7 +1241,18 @@ where
let quickack = (first[0] & 0x80) != 0;
let len_words = if (first[0] & 0x7f) == 0x7f {
let mut ext = [0u8; 3];
read_exact_with_timeout(client_reader, &mut ext, frame_read_timeout).await?;
read_exact_with_policy(
client_reader,
&mut ext,
idle_policy,
idle_state,
last_downstream_activity_ms,
session_started_at,
forensics,
stats,
"abridged.extended_len",
)
.await?;
u32::from_le_bytes([ext[0], ext[1], ext[2], 0]) as usize
} else {
(first[0] & 0x7f) as usize
@@ -875,7 +1265,19 @@ where
}
ProtoTag::Intermediate | ProtoTag::Secure => {
let mut len_buf = [0u8; 4];
match read_exact_with_timeout(client_reader, &mut len_buf, frame_read_timeout).await {
match read_exact_with_policy(
client_reader,
&mut len_buf,
idle_policy,
idle_state,
last_downstream_activity_ms,
session_started_at,
forensics,
stats,
"len_prefix",
)
.await
{
Ok(()) => {}
Err(ProxyError::Io(e)) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
return Ok(None);
@@ -903,6 +1305,7 @@ where
proto = ?proto_tag,
"Frame too small — corrupt or probe"
);
stats.increment_relay_protocol_desync_close_total();
return Err(ProxyError::Proxy(format!("Frame too small: {len}")));
}
@@ -923,6 +1326,7 @@ where
Some(payload_len) => payload_len,
None => {
stats.increment_secure_padding_invalid();
stats.increment_relay_protocol_desync_close_total();
return Err(ProxyError::Proxy(format!(
"Invalid secure frame length: {len}"
)));
@@ -939,17 +1343,91 @@ where
payload.reserve(len - current_cap);
}
payload.resize(len, 0);
read_exact_with_timeout(client_reader, &mut payload[..len], frame_read_timeout).await?;
read_exact_with_policy(
client_reader,
&mut payload[..len],
idle_policy,
idle_state,
last_downstream_activity_ms,
session_started_at,
forensics,
stats,
"payload",
)
.await?;
// Secure Intermediate: strip validated trailing padding bytes.
if proto_tag == ProtoTag::Secure {
payload.truncate(secure_payload_len);
}
*frame_counter += 1;
idle_state.on_client_frame(Instant::now());
clear_relay_idle_candidate(forensics.conn_id);
return Ok(Some((payload, quickack)));
}
}
#[cfg(test)]
async fn read_client_payload_legacy<R>(
client_reader: &mut CryptoReader<R>,
proto_tag: ProtoTag,
max_frame: usize,
frame_read_timeout: Duration,
buffer_pool: &Arc<BufferPool>,
forensics: &RelayForensicsState,
frame_counter: &mut u64,
stats: &Stats,
) -> Result<Option<(PooledBuffer, bool)>>
where
R: AsyncRead + Unpin + Send + 'static,
{
let now = Instant::now();
let mut idle_state = RelayClientIdleState::new(now);
let last_downstream_activity_ms = AtomicU64::new(0);
let idle_policy = RelayClientIdlePolicy::disabled(frame_read_timeout);
read_client_payload_with_idle_policy(
client_reader,
proto_tag,
max_frame,
buffer_pool,
forensics,
frame_counter,
stats,
&idle_policy,
&mut idle_state,
&last_downstream_activity_ms,
now,
)
.await
}
#[cfg(test)]
async fn read_client_payload<R>(
client_reader: &mut CryptoReader<R>,
proto_tag: ProtoTag,
max_frame: usize,
frame_read_timeout: Duration,
buffer_pool: &Arc<BufferPool>,
forensics: &RelayForensicsState,
frame_counter: &mut u64,
stats: &Stats,
) -> Result<Option<(PooledBuffer, bool)>>
where
R: AsyncRead + Unpin + Send + 'static,
{
read_client_payload_legacy(
client_reader,
proto_tag,
max_frame,
frame_read_timeout,
buffer_pool,
forensics,
frame_counter,
stats,
)
.await
}
enum MeWriterResponseOutcome {
Continue {
frames: usize,
@@ -1171,3 +1649,7 @@ where
#[cfg(test)]
#[path = "middle_relay_security_tests.rs"]
mod security_tests;
#[cfg(test)]
#[path = "middle_relay_idle_policy_security_tests.rs"]
mod idle_policy_security_tests;