Adaptive Buffers + Session Eviction Method

This commit is contained in:
Alexey
2026-03-18 10:49:02 +03:00
parent 5dd0c47f14
commit 3739f38440
27 changed files with 1479 additions and 50 deletions

View File

@@ -0,0 +1,383 @@
use dashmap::DashMap;
use std::cmp::max;
use std::sync::OnceLock;
use std::time::{Duration, Instant};
const EMA_ALPHA: f64 = 0.2;
const PROFILE_TTL: Duration = Duration::from_secs(300);
const THROUGHPUT_UP_BPS: f64 = 8_000_000.0;
const THROUGHPUT_DOWN_BPS: f64 = 2_000_000.0;
const RATIO_CONFIRM_THRESHOLD: f64 = 1.12;
const TIER1_HOLD_TICKS: u32 = 8;
const TIER2_HOLD_TICKS: u32 = 4;
const QUIET_DEMOTE_TICKS: u32 = 480;
const HARD_COOLDOWN_TICKS: u32 = 20;
const HARD_PENDING_THRESHOLD: u32 = 3;
const HARD_PARTIAL_RATIO_THRESHOLD: f64 = 0.25;
const DIRECT_C2S_CAP_BYTES: usize = 128 * 1024;
const DIRECT_S2C_CAP_BYTES: usize = 512 * 1024;
const ME_FRAMES_CAP: usize = 96;
const ME_BYTES_CAP: usize = 384 * 1024;
const ME_DELAY_MIN_US: u64 = 150;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum AdaptiveTier {
Base = 0,
Tier1 = 1,
Tier2 = 2,
Tier3 = 3,
}
impl AdaptiveTier {
pub fn promote(self) -> Self {
match self {
Self::Base => Self::Tier1,
Self::Tier1 => Self::Tier2,
Self::Tier2 => Self::Tier3,
Self::Tier3 => Self::Tier3,
}
}
pub fn demote(self) -> Self {
match self {
Self::Base => Self::Base,
Self::Tier1 => Self::Base,
Self::Tier2 => Self::Tier1,
Self::Tier3 => Self::Tier2,
}
}
fn ratio(self) -> (usize, usize) {
match self {
Self::Base => (1, 1),
Self::Tier1 => (5, 4),
Self::Tier2 => (3, 2),
Self::Tier3 => (2, 1),
}
}
pub fn as_u8(self) -> u8 {
self as u8
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TierTransitionReason {
SoftConfirmed,
HardPressure,
QuietDemotion,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct TierTransition {
pub from: AdaptiveTier,
pub to: AdaptiveTier,
pub reason: TierTransitionReason,
}
#[derive(Debug, Clone, Copy, Default)]
pub struct RelaySignalSample {
pub c2s_bytes: u64,
pub s2c_requested_bytes: u64,
pub s2c_written_bytes: u64,
pub s2c_write_ops: u64,
pub s2c_partial_writes: u64,
pub s2c_consecutive_pending_writes: u32,
}
#[derive(Debug, Clone, Copy)]
pub struct SessionAdaptiveController {
tier: AdaptiveTier,
max_tier_seen: AdaptiveTier,
throughput_ema_bps: f64,
incoming_ema_bps: f64,
outgoing_ema_bps: f64,
tier1_hold_ticks: u32,
tier2_hold_ticks: u32,
quiet_ticks: u32,
hard_cooldown_ticks: u32,
}
impl SessionAdaptiveController {
pub fn new(initial_tier: AdaptiveTier) -> Self {
Self {
tier: initial_tier,
max_tier_seen: initial_tier,
throughput_ema_bps: 0.0,
incoming_ema_bps: 0.0,
outgoing_ema_bps: 0.0,
tier1_hold_ticks: 0,
tier2_hold_ticks: 0,
quiet_ticks: 0,
hard_cooldown_ticks: 0,
}
}
pub fn max_tier_seen(&self) -> AdaptiveTier {
self.max_tier_seen
}
pub fn observe(&mut self, sample: RelaySignalSample, tick_secs: f64) -> Option<TierTransition> {
if tick_secs <= f64::EPSILON {
return None;
}
if self.hard_cooldown_ticks > 0 {
self.hard_cooldown_ticks -= 1;
}
let c2s_bps = (sample.c2s_bytes as f64 * 8.0) / tick_secs;
let incoming_bps = (sample.s2c_requested_bytes as f64 * 8.0) / tick_secs;
let outgoing_bps = (sample.s2c_written_bytes as f64 * 8.0) / tick_secs;
let throughput = c2s_bps.max(outgoing_bps);
self.throughput_ema_bps = ema(self.throughput_ema_bps, throughput);
self.incoming_ema_bps = ema(self.incoming_ema_bps, incoming_bps);
self.outgoing_ema_bps = ema(self.outgoing_ema_bps, outgoing_bps);
let tier1_now = self.throughput_ema_bps >= THROUGHPUT_UP_BPS;
if tier1_now {
self.tier1_hold_ticks = self.tier1_hold_ticks.saturating_add(1);
} else {
self.tier1_hold_ticks = 0;
}
let ratio = if self.outgoing_ema_bps <= f64::EPSILON {
0.0
} else {
self.incoming_ema_bps / self.outgoing_ema_bps
};
let tier2_now = ratio >= RATIO_CONFIRM_THRESHOLD;
if tier2_now {
self.tier2_hold_ticks = self.tier2_hold_ticks.saturating_add(1);
} else {
self.tier2_hold_ticks = 0;
}
let partial_ratio = if sample.s2c_write_ops == 0 {
0.0
} else {
sample.s2c_partial_writes as f64 / sample.s2c_write_ops as f64
};
let hard_now = sample.s2c_consecutive_pending_writes >= HARD_PENDING_THRESHOLD
|| partial_ratio >= HARD_PARTIAL_RATIO_THRESHOLD;
if hard_now && self.hard_cooldown_ticks == 0 {
return self.promote(TierTransitionReason::HardPressure, HARD_COOLDOWN_TICKS);
}
if self.tier1_hold_ticks >= TIER1_HOLD_TICKS && self.tier2_hold_ticks >= TIER2_HOLD_TICKS {
return self.promote(TierTransitionReason::SoftConfirmed, 0);
}
let demote_candidate = self.throughput_ema_bps < THROUGHPUT_DOWN_BPS && !tier2_now && !hard_now;
if demote_candidate {
self.quiet_ticks = self.quiet_ticks.saturating_add(1);
if self.quiet_ticks >= QUIET_DEMOTE_TICKS {
self.quiet_ticks = 0;
return self.demote(TierTransitionReason::QuietDemotion);
}
} else {
self.quiet_ticks = 0;
}
None
}
fn promote(
&mut self,
reason: TierTransitionReason,
hard_cooldown_ticks: u32,
) -> Option<TierTransition> {
let from = self.tier;
let to = from.promote();
if from == to {
return None;
}
self.tier = to;
self.max_tier_seen = max(self.max_tier_seen, to);
self.hard_cooldown_ticks = hard_cooldown_ticks;
self.tier1_hold_ticks = 0;
self.tier2_hold_ticks = 0;
self.quiet_ticks = 0;
Some(TierTransition { from, to, reason })
}
fn demote(&mut self, reason: TierTransitionReason) -> Option<TierTransition> {
let from = self.tier;
let to = from.demote();
if from == to {
return None;
}
self.tier = to;
self.tier1_hold_ticks = 0;
self.tier2_hold_ticks = 0;
Some(TierTransition { from, to, reason })
}
}
#[derive(Debug, Clone, Copy)]
struct UserAdaptiveProfile {
tier: AdaptiveTier,
seen_at: Instant,
}
fn profiles() -> &'static DashMap<String, UserAdaptiveProfile> {
static USER_PROFILES: OnceLock<DashMap<String, UserAdaptiveProfile>> = OnceLock::new();
USER_PROFILES.get_or_init(DashMap::new)
}
pub fn seed_tier_for_user(user: &str) -> AdaptiveTier {
let now = Instant::now();
if let Some(entry) = profiles().get(user) {
let value = entry.value();
if now.duration_since(value.seen_at) <= PROFILE_TTL {
return value.tier;
}
}
AdaptiveTier::Base
}
pub fn record_user_tier(user: &str, tier: AdaptiveTier) {
let now = Instant::now();
if let Some(mut entry) = profiles().get_mut(user) {
let existing = *entry;
let effective = if now.duration_since(existing.seen_at) > PROFILE_TTL {
tier
} else {
max(existing.tier, tier)
};
*entry = UserAdaptiveProfile {
tier: effective,
seen_at: now,
};
return;
}
profiles().insert(
user.to_string(),
UserAdaptiveProfile { tier, seen_at: now },
);
}
pub fn direct_copy_buffers_for_tier(
tier: AdaptiveTier,
base_c2s: usize,
base_s2c: usize,
) -> (usize, usize) {
let (num, den) = tier.ratio();
(
scale(base_c2s, num, den, DIRECT_C2S_CAP_BYTES),
scale(base_s2c, num, den, DIRECT_S2C_CAP_BYTES),
)
}
pub fn me_flush_policy_for_tier(
tier: AdaptiveTier,
base_frames: usize,
base_bytes: usize,
base_delay: Duration,
) -> (usize, usize, Duration) {
let (num, den) = tier.ratio();
let frames = scale(base_frames, num, den, ME_FRAMES_CAP).max(1);
let bytes = scale(base_bytes, num, den, ME_BYTES_CAP).max(4096);
let delay_us = base_delay.as_micros() as u64;
let adjusted_delay_us = match tier {
AdaptiveTier::Base => delay_us,
AdaptiveTier::Tier1 => (delay_us.saturating_mul(7)).saturating_div(10),
AdaptiveTier::Tier2 => delay_us.saturating_div(2),
AdaptiveTier::Tier3 => (delay_us.saturating_mul(3)).saturating_div(10),
}
.max(ME_DELAY_MIN_US)
.min(delay_us.max(ME_DELAY_MIN_US));
(frames, bytes, Duration::from_micros(adjusted_delay_us))
}
fn ema(prev: f64, value: f64) -> f64 {
if prev <= f64::EPSILON {
value
} else {
(prev * (1.0 - EMA_ALPHA)) + (value * EMA_ALPHA)
}
}
fn scale(base: usize, numerator: usize, denominator: usize, cap: usize) -> usize {
let scaled = base
.saturating_mul(numerator)
.saturating_div(denominator.max(1));
scaled.min(cap).max(1)
}
#[cfg(test)]
mod tests {
use super::*;
fn sample(
c2s_bytes: u64,
s2c_requested_bytes: u64,
s2c_written_bytes: u64,
s2c_write_ops: u64,
s2c_partial_writes: u64,
s2c_consecutive_pending_writes: u32,
) -> RelaySignalSample {
RelaySignalSample {
c2s_bytes,
s2c_requested_bytes,
s2c_written_bytes,
s2c_write_ops,
s2c_partial_writes,
s2c_consecutive_pending_writes,
}
}
#[test]
fn test_soft_promotion_requires_tier1_and_tier2() {
let mut ctrl = SessionAdaptiveController::new(AdaptiveTier::Base);
let tick_secs = 0.25;
let mut promoted = None;
for _ in 0..8 {
promoted = ctrl.observe(
sample(
300_000, // ~9.6 Mbps
320_000, // incoming > outgoing to confirm tier2
250_000,
10,
0,
0,
),
tick_secs,
);
}
let transition = promoted.expect("expected soft promotion");
assert_eq!(transition.from, AdaptiveTier::Base);
assert_eq!(transition.to, AdaptiveTier::Tier1);
assert_eq!(transition.reason, TierTransitionReason::SoftConfirmed);
}
#[test]
fn test_hard_promotion_on_pending_pressure() {
let mut ctrl = SessionAdaptiveController::new(AdaptiveTier::Base);
let transition = ctrl
.observe(
sample(10_000, 20_000, 10_000, 4, 1, 3),
0.25,
)
.expect("expected hard promotion");
assert_eq!(transition.reason, TierTransitionReason::HardPressure);
assert_eq!(transition.to, AdaptiveTier::Tier1);
}
#[test]
fn test_quiet_demotion_is_slow_and_stepwise() {
let mut ctrl = SessionAdaptiveController::new(AdaptiveTier::Tier2);
let mut demotion = None;
for _ in 0..QUIET_DEMOTE_TICKS {
demotion = ctrl.observe(sample(1, 1, 1, 1, 0, 0), 0.25);
}
let transition = demotion.expect("expected quiet demotion");
assert_eq!(transition.from, AdaptiveTier::Tier2);
assert_eq!(transition.to, AdaptiveTier::Tier1);
assert_eq!(transition.reason, TierTransitionReason::QuietDemotion);
}
}

View File

@@ -40,6 +40,7 @@ use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle
use crate::proxy::masking::handle_bad_client;
use crate::proxy::middle_relay::handle_via_middle_proxy;
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
use crate::proxy::session_eviction::register_session;
fn beobachten_ttl(config: &ProxyConfig) -> Duration {
Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60))
@@ -731,6 +732,17 @@ impl RunningClientHandler {
return Err(e);
}
let registration = register_session(&user, success.dc_idx);
if registration.replaced_existing {
stats.increment_reconnect_evict_total();
warn!(
user = %user,
dc = success.dc_idx,
"Reconnect detected: replacing active session for user+dc"
);
}
let session_lease = registration.lease;
let route_snapshot = route_runtime.snapshot();
let session_id = rng.u64();
let relay_result = if config.general.use_middle_proxy
@@ -750,6 +762,7 @@ impl RunningClientHandler {
route_runtime.subscribe(),
route_snapshot,
session_id,
session_lease.clone(),
)
.await
} else {
@@ -766,6 +779,7 @@ impl RunningClientHandler {
route_runtime.subscribe(),
route_snapshot,
session_id,
session_lease.clone(),
)
.await
}
@@ -783,6 +797,7 @@ impl RunningClientHandler {
route_runtime.subscribe(),
route_snapshot,
session_id,
session_lease.clone(),
)
.await
};

View File

@@ -18,6 +18,8 @@ use crate::proxy::route_mode::{
RelayRouteMode, RouteCutoverState, ROUTE_SWITCH_ERROR_MSG, affected_cutover_state,
cutover_stagger_delay,
};
use crate::proxy::adaptive_buffers;
use crate::proxy::session_eviction::SessionLease;
use crate::stats::Stats;
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
use crate::transport::UpstreamManager;
@@ -34,6 +36,7 @@ pub(crate) async fn handle_via_direct<R, W>(
mut route_rx: watch::Receiver<RouteCutoverState>,
route_snapshot: RouteCutoverState,
session_id: u64,
session_lease: SessionLease,
) -> Result<()>
where
R: AsyncRead + Unpin + Send + 'static,
@@ -67,16 +70,26 @@ where
stats.increment_user_curr_connects(user);
stats.increment_current_connections_direct();
let seed_tier = adaptive_buffers::seed_tier_for_user(user);
let (c2s_copy_buf, s2c_copy_buf) = adaptive_buffers::direct_copy_buffers_for_tier(
seed_tier,
config.general.direct_relay_copy_buf_c2s_bytes,
config.general.direct_relay_copy_buf_s2c_bytes,
);
let relay_result = relay_bidirectional(
client_reader,
client_writer,
tg_reader,
tg_writer,
config.general.direct_relay_copy_buf_c2s_bytes,
config.general.direct_relay_copy_buf_s2c_bytes,
c2s_copy_buf,
s2c_copy_buf,
user,
success.dc_idx,
Arc::clone(&stats),
buffer_pool,
session_lease,
seed_tier,
);
tokio::pin!(relay_result);
let relay_result = loop {

View File

@@ -20,6 +20,8 @@ use crate::proxy::route_mode::{
RelayRouteMode, RouteCutoverState, ROUTE_SWITCH_ERROR_MSG, affected_cutover_state,
cutover_stagger_delay,
};
use crate::proxy::adaptive_buffers::{self, AdaptiveTier};
use crate::proxy::session_eviction::SessionLease;
use crate::stats::Stats;
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
use crate::transport::middle_proxy::{MePool, MeResponse, proto_flags_for_tag};
@@ -59,8 +61,8 @@ struct MeD2cFlushPolicy {
}
impl MeD2cFlushPolicy {
fn from_config(config: &ProxyConfig) -> Self {
Self {
fn from_config(config: &ProxyConfig, tier: AdaptiveTier) -> Self {
let base = Self {
max_frames: config
.general
.me_d2c_flush_batch_max_frames
@@ -71,6 +73,18 @@ impl MeD2cFlushPolicy {
.max(ME_D2C_FLUSH_BATCH_MAX_BYTES_MIN),
max_delay: Duration::from_micros(config.general.me_d2c_flush_batch_max_delay_us),
ack_flush_immediate: config.general.me_d2c_ack_flush_immediate,
};
let (max_frames, max_bytes, max_delay) = adaptive_buffers::me_flush_policy_for_tier(
tier,
base.max_frames,
base.max_bytes,
base.max_delay,
);
Self {
max_frames,
max_bytes,
max_delay,
ack_flush_immediate: base.ack_flush_immediate,
}
}
}
@@ -235,6 +249,7 @@ pub(crate) async fn handle_via_middle_proxy<R, W>(
mut route_rx: watch::Receiver<RouteCutoverState>,
route_snapshot: RouteCutoverState,
session_id: u64,
session_lease: SessionLease,
) -> Result<()>
where
R: AsyncRead + Unpin + Send + 'static,
@@ -244,6 +259,7 @@ where
let peer = success.peer;
let proto_tag = success.proto_tag;
let pool_generation = me_pool.current_generation();
let seed_tier = adaptive_buffers::seed_tier_for_user(&user);
debug!(
user = %user,
@@ -295,6 +311,15 @@ where
return Err(ProxyError::Proxy(ROUTE_SWITCH_ERROR_MSG.to_string()));
}
if session_lease.is_stale() {
stats.increment_reconnect_stale_close_total();
let _ = me_pool.send_close(conn_id).await;
me_pool.registry().unregister(conn_id).await;
stats.decrement_current_connections_me();
stats.decrement_user_curr_connects(&user);
return Err(ProxyError::Proxy("Session evicted by reconnect".to_string()));
}
// Per-user ad_tag from access.user_ad_tags; fallback to general.ad_tag (hot-reloadable)
let user_tag: Option<Vec<u8>> = config
.access
@@ -368,7 +393,7 @@ where
let rng_clone = rng.clone();
let user_clone = user.clone();
let bytes_me2c_clone = bytes_me2c.clone();
let d2c_flush_policy = MeD2cFlushPolicy::from_config(&config);
let d2c_flush_policy = MeD2cFlushPolicy::from_config(&config, seed_tier);
let me_writer = tokio::spawn(async move {
let mut writer = crypto_writer;
let mut frame_buf = Vec::with_capacity(16 * 1024);
@@ -528,6 +553,12 @@ where
let mut frame_counter: u64 = 0;
let mut route_watch_open = true;
loop {
if session_lease.is_stale() {
stats.increment_reconnect_stale_close_total();
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
main_result = Err(ProxyError::Proxy("Session evicted by reconnect".to_string()));
break;
}
if let Some(cutover) = affected_cutover_state(
&route_rx,
RelayRouteMode::Middle,
@@ -636,6 +667,7 @@ where
frames_ok = frame_counter,
"ME relay cleanup"
);
adaptive_buffers::record_user_tier(&user, seed_tier);
me_pool.registry().unregister(conn_id).await;
stats.decrement_current_connections_me();
stats.decrement_user_curr_connects(&user);

View File

@@ -1,5 +1,6 @@
//! Proxy Defs
pub mod adaptive_buffers;
pub mod client;
pub mod direct_relay;
pub mod handshake;
@@ -7,6 +8,7 @@ pub mod masking;
pub mod middle_relay;
pub mod route_mode;
pub mod relay;
pub mod session_eviction;
pub use client::ClientHandler;
#[allow(unused_imports)]

View File

@@ -63,6 +63,10 @@ use tokio::io::{
use tokio::time::Instant;
use tracing::{debug, trace, warn};
use crate::error::Result;
use crate::proxy::adaptive_buffers::{
self, AdaptiveTier, RelaySignalSample, SessionAdaptiveController, TierTransitionReason,
};
use crate::proxy::session_eviction::SessionLease;
use crate::stats::Stats;
use crate::stream::BufferPool;
@@ -79,6 +83,7 @@ const ACTIVITY_TIMEOUT: Duration = Duration::from_secs(1800);
/// 10 seconds gives responsive timeout detection (±10s accuracy)
/// without measurable overhead from atomic reads.
const WATCHDOG_INTERVAL: Duration = Duration::from_secs(10);
const ADAPTIVE_TICK: Duration = Duration::from_millis(250);
// ============= CombinedStream =============
@@ -155,6 +160,16 @@ struct SharedCounters {
s2c_ops: AtomicU64,
/// Milliseconds since relay epoch of last I/O activity
last_activity_ms: AtomicU64,
/// Bytes requested to write to client (S→C direction).
s2c_requested_bytes: AtomicU64,
/// Total write operations for S→C direction.
s2c_write_ops: AtomicU64,
/// Number of partial writes to client.
s2c_partial_writes: AtomicU64,
/// Number of times S→C poll_write returned Pending.
s2c_pending_writes: AtomicU64,
/// Consecutive pending writes in S→C direction.
s2c_consecutive_pending_writes: AtomicU64,
}
impl SharedCounters {
@@ -165,6 +180,11 @@ impl SharedCounters {
c2s_ops: AtomicU64::new(0),
s2c_ops: AtomicU64::new(0),
last_activity_ms: AtomicU64::new(0),
s2c_requested_bytes: AtomicU64::new(0),
s2c_write_ops: AtomicU64::new(0),
s2c_partial_writes: AtomicU64::new(0),
s2c_pending_writes: AtomicU64::new(0),
s2c_consecutive_pending_writes: AtomicU64::new(0),
}
}
@@ -259,9 +279,21 @@ impl<S: AsyncWrite + Unpin> AsyncWrite for StatsIo<S> {
buf: &[u8],
) -> Poll<io::Result<usize>> {
let this = self.get_mut();
this.counters
.s2c_requested_bytes
.fetch_add(buf.len() as u64, Ordering::Relaxed);
match Pin::new(&mut this.inner).poll_write(cx, buf) {
Poll::Ready(Ok(n)) => {
this.counters.s2c_write_ops.fetch_add(1, Ordering::Relaxed);
this.counters
.s2c_consecutive_pending_writes
.store(0, Ordering::Relaxed);
if n < buf.len() {
this.counters
.s2c_partial_writes
.fetch_add(1, Ordering::Relaxed);
}
if n > 0 {
// S→C: data written to client
this.counters.s2c_bytes.fetch_add(n as u64, Ordering::Relaxed);
@@ -275,6 +307,15 @@ impl<S: AsyncWrite + Unpin> AsyncWrite for StatsIo<S> {
}
Poll::Ready(Ok(n))
}
Poll::Pending => {
this.counters
.s2c_pending_writes
.fetch_add(1, Ordering::Relaxed);
this.counters
.s2c_consecutive_pending_writes
.fetch_add(1, Ordering::Relaxed);
Poll::Pending
}
other => other,
}
}
@@ -316,8 +357,11 @@ pub async fn relay_bidirectional<CR, CW, SR, SW>(
c2s_buf_size: usize,
s2c_buf_size: usize,
user: &str,
dc_idx: i16,
stats: Arc<Stats>,
_buffer_pool: Arc<BufferPool>,
session_lease: SessionLease,
seed_tier: AdaptiveTier,
) -> Result<()>
where
CR: AsyncRead + Unpin + Send + 'static,
@@ -345,13 +389,33 @@ where
// ── Watchdog: activity timeout + periodic rate logging ──────────
let wd_counters = Arc::clone(&counters);
let wd_user = user_owned.clone();
let wd_dc = dc_idx;
let wd_stats = Arc::clone(&stats);
let wd_session = session_lease.clone();
let watchdog = async {
let mut prev_c2s: u64 = 0;
let mut prev_s2c: u64 = 0;
let mut prev_c2s_log: u64 = 0;
let mut prev_s2c_log: u64 = 0;
let mut prev_c2s_sample: u64 = 0;
let mut prev_s2c_requested_sample: u64 = 0;
let mut prev_s2c_written_sample: u64 = 0;
let mut prev_s2c_write_ops_sample: u64 = 0;
let mut prev_s2c_partial_sample: u64 = 0;
let mut accumulated_log = Duration::ZERO;
let mut adaptive = SessionAdaptiveController::new(seed_tier);
loop {
tokio::time::sleep(WATCHDOG_INTERVAL).await;
tokio::time::sleep(ADAPTIVE_TICK).await;
if wd_session.is_stale() {
wd_stats.increment_reconnect_stale_close_total();
warn!(
user = %wd_user,
dc = wd_dc,
"Session evicted by reconnect"
);
return;
}
let now = Instant::now();
let idle = wd_counters.idle_duration(now, epoch);
@@ -370,11 +434,80 @@ where
return; // Causes select! to cancel copy_bidirectional
}
let c2s_total = wd_counters.c2s_bytes.load(Ordering::Relaxed);
let s2c_requested_total = wd_counters
.s2c_requested_bytes
.load(Ordering::Relaxed);
let s2c_written_total = wd_counters.s2c_bytes.load(Ordering::Relaxed);
let s2c_write_ops_total = wd_counters
.s2c_write_ops
.load(Ordering::Relaxed);
let s2c_partial_total = wd_counters
.s2c_partial_writes
.load(Ordering::Relaxed);
let consecutive_pending = wd_counters
.s2c_consecutive_pending_writes
.load(Ordering::Relaxed) as u32;
let sample = RelaySignalSample {
c2s_bytes: c2s_total.saturating_sub(prev_c2s_sample),
s2c_requested_bytes: s2c_requested_total
.saturating_sub(prev_s2c_requested_sample),
s2c_written_bytes: s2c_written_total
.saturating_sub(prev_s2c_written_sample),
s2c_write_ops: s2c_write_ops_total
.saturating_sub(prev_s2c_write_ops_sample),
s2c_partial_writes: s2c_partial_total
.saturating_sub(prev_s2c_partial_sample),
s2c_consecutive_pending_writes: consecutive_pending,
};
if let Some(transition) = adaptive.observe(sample, ADAPTIVE_TICK.as_secs_f64()) {
match transition.reason {
TierTransitionReason::SoftConfirmed => {
wd_stats.increment_relay_adaptive_promotions_total();
}
TierTransitionReason::HardPressure => {
wd_stats.increment_relay_adaptive_promotions_total();
wd_stats.increment_relay_adaptive_hard_promotions_total();
}
TierTransitionReason::QuietDemotion => {
wd_stats.increment_relay_adaptive_demotions_total();
}
}
adaptive_buffers::record_user_tier(&wd_user, adaptive.max_tier_seen());
debug!(
user = %wd_user,
dc = wd_dc,
from_tier = transition.from.as_u8(),
to_tier = transition.to.as_u8(),
reason = ?transition.reason,
throughput_ema_bps = sample
.c2s_bytes
.max(sample.s2c_written_bytes)
.saturating_mul(8)
.saturating_mul(4),
"Adaptive relay tier transition"
);
}
prev_c2s_sample = c2s_total;
prev_s2c_requested_sample = s2c_requested_total;
prev_s2c_written_sample = s2c_written_total;
prev_s2c_write_ops_sample = s2c_write_ops_total;
prev_s2c_partial_sample = s2c_partial_total;
accumulated_log = accumulated_log.saturating_add(ADAPTIVE_TICK);
if accumulated_log < WATCHDOG_INTERVAL {
continue;
}
accumulated_log = Duration::ZERO;
// ── Periodic rate logging ───────────────────────────────
let c2s = wd_counters.c2s_bytes.load(Ordering::Relaxed);
let s2c = wd_counters.s2c_bytes.load(Ordering::Relaxed);
let c2s_delta = c2s - prev_c2s;
let s2c_delta = s2c - prev_s2c;
let c2s_delta = c2s.saturating_sub(prev_c2s_log);
let s2c_delta = s2c.saturating_sub(prev_s2c_log);
if c2s_delta > 0 || s2c_delta > 0 {
let secs = WATCHDOG_INTERVAL.as_secs_f64();
@@ -388,8 +521,8 @@ where
);
}
prev_c2s = c2s;
prev_s2c = s2c;
prev_c2s_log = c2s;
prev_s2c_log = s2c;
}
};
@@ -424,6 +557,7 @@ where
let c2s_ops = counters.c2s_ops.load(Ordering::Relaxed);
let s2c_ops = counters.s2c_ops.load(Ordering::Relaxed);
let duration = epoch.elapsed();
adaptive_buffers::record_user_tier(&user_owned, seed_tier);
match copy_result {
Some(Ok((c2s, s2c))) => {

View File

@@ -0,0 +1,46 @@
/// Session eviction is intentionally disabled in runtime.
///
/// The initial `user+dc` single-lease model caused valid parallel client
/// connections to evict each other. Keep the API shape for compatibility,
/// but make it a no-op until a safer policy is introduced.
#[derive(Debug, Clone, Default)]
pub struct SessionLease;
impl SessionLease {
pub fn is_stale(&self) -> bool {
false
}
#[allow(dead_code)]
pub fn release(&self) {}
}
pub struct RegistrationResult {
pub lease: SessionLease,
pub replaced_existing: bool,
}
pub fn register_session(_user: &str, _dc_idx: i16) -> RegistrationResult {
RegistrationResult {
lease: SessionLease,
replaced_existing: false,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_session_eviction_disabled_behavior() {
let first = register_session("alice", 2);
let second = register_session("alice", 2);
assert!(!first.replaced_existing);
assert!(!second.replaced_existing);
assert!(!first.lease.is_stale());
assert!(!second.lease.is_stale());
first.lease.release();
second.lease.release();
}
}