Compare commits

...

24 Commits

Author SHA1 Message Date
Roman Martynov 444c2257f7
Merge 4d83d02a8f into a383efcb21 2026-03-26 13:54:02 +09:00
Alexey a383efcb21
Bounded Hybrid Loop + Watch + Family ArcSwap Snapshots + Health in Parallel + ArcSwap Writers + Registry Split + Endpoint on ArcSwap + New Backpressure Model + ME Decomposition: merge pull request #586 from telemt/flow
Bounded Hybrid Loop + Watch + Family ArcSwap Snapshots + Health in Parallel + ArcSwap Writers + Registry Split + Endpoint on ArcSwap + New Backpressure Model + ME Decomposition
2026-03-26 02:31:18 +03:00
Alexey cb5753f77c
Update admission.rs
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-26 02:19:35 +03:00
Alexey 7a075b2ffe
Middle Relay fixes
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-26 02:18:39 +03:00
Alexey 7de822dd15
RPC Proxy-req fixes
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 22:51:00 +03:00
Alexey 1bbf4584a6
Merge branch 'main' into flow 2026-03-25 22:25:58 +03:00
Alexey 70479c4094
Unexpected-only Quarantine
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 22:25:39 +03:00
Alexey b94746a6e0
Dashmap-driven Routing + Health Parallel + Family Runtime State
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 21:26:20 +03:00
Alexey ceae1564af
Floor Runtime + Writer Selection Policy + Reconnect/Warmup + TransportPolicy + NAT Runtime Cores
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 20:55:20 +03:00
Alexey 7ce5fc66db
ME Reinit Core advancing + Binding Policy Core
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 20:35:57 +03:00
Alexey 41493462a1
Drain + Single-Endpoint Runtime Cores
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 20:29:22 +03:00
Alexey 6ee4d4648c
ME Health Core
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 20:01:44 +03:00
Alexey 97f6649584
ME Route Runtime Core
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 19:56:25 +03:00
Alexey dc6b6d3f9d
ME Writer Lifecycle Core
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 19:47:41 +03:00
Alexey 1c3e0d4e46
ME Reinit Core
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 19:43:02 +03:00
Alexey 0b78583cf5
ME Routing Core
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 18:18:06 +03:00
Alexey 28d318d724
ME Writer Task Consolidation
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 17:59:54 +03:00
Alexey 70c2f0f045
RoutingTable + BindingState
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 17:50:44 +03:00
Alexey 8bab3f70e1
WritersState on ArcSwao + Preferred Endpoint on ArcSwap + Two-map Rotation for Desync Dedup
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 17:25:35 +03:00
Alexey 41d786cc11
Safety Gates Invariants + HybridAsyncPersistent + Watch + Runtime Snapshots + ME Writer Ping Tracker + Parallel Recovery + Backpressure Guardrails
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-25 16:29:35 +03:00
sintanial 4d83d02a8f
Apply [timeouts] tg_connect to upstream DC TCP connect attempts
Wire config.timeouts.tg_connect into UpstreamManager; per-attempt timeout uses
the same .max(1) pattern as connect_budget_ms.

Reject timeouts.tg_connect = 0 at config load (consistent with
general.upstream_connect_budget_ms and related checks). Default when the key
is omitted remains default_connect_timeout() via serde.

Fixes telemt/telemt#439
2026-03-21 16:26:51 +03:00
sintanial fea8bc63fd
Merge branch 'main' of https://github.com/telemt/telemt 2026-03-20 23:27:02 +03:00
sintanial d8f7173f15 Merge branch 'main' of https://github.com/telemt/telemt 2026-03-01 15:18:47 +03:00
sintanial b23d433e19 Merge branch 'main' of https://github.com/telemt/telemt 2026-03-01 13:48:59 +03:00
37 changed files with 2345 additions and 1199 deletions

2
Cargo.lock generated
View File

@ -2793,7 +2793,7 @@ checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417"
[[package]]
name = "telemt"
version = "3.3.31"
version = "3.3.32"
dependencies = [
"aes",
"anyhow",

View File

@ -1,6 +1,6 @@
[package]
name = "telemt"
version = "3.3.31"
version = "3.3.32"
edition = "2024"
[features]

View File

@ -35,11 +35,14 @@ pub(super) struct RuntimeGatesData {
pub(super) conditional_cast_enabled: bool,
pub(super) me_runtime_ready: bool,
pub(super) me2dc_fallback_enabled: bool,
pub(super) me2dc_fast_enabled: bool,
pub(super) use_middle_proxy: bool,
pub(super) route_mode: &'static str,
pub(super) reroute_active: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub(super) reroute_to_direct_at_epoch_secs: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub(super) reroute_reason: Option<&'static str>,
pub(super) startup_status: &'static str,
pub(super) startup_stage: String,
pub(super) startup_progress_pct: f64,
@ -86,6 +89,7 @@ pub(super) struct EffectiveMiddleProxyLimits {
pub(super) writer_pick_mode: &'static str,
pub(super) writer_pick_sample_size: u8,
pub(super) me2dc_fallback: bool,
pub(super) me2dc_fast: bool,
}
#[derive(Serialize)]
@ -169,6 +173,8 @@ pub(super) async fn build_runtime_gates_data(
let startup_summary = build_runtime_startup_summary(shared).await;
let route_state = shared.route_runtime.snapshot();
let route_mode = route_state.mode.as_str();
let fast_fallback_enabled =
cfg.general.use_middle_proxy && cfg.general.me2dc_fallback && cfg.general.me2dc_fast;
let reroute_active = cfg.general.use_middle_proxy
&& cfg.general.me2dc_fallback
&& matches!(route_state.mode, RelayRouteMode::Direct);
@ -177,6 +183,15 @@ pub(super) async fn build_runtime_gates_data(
} else {
None
};
let reroute_reason = if reroute_active {
if fast_fallback_enabled {
Some("fast_not_ready_fallback")
} else {
Some("strict_grace_fallback")
}
} else {
None
};
let me_runtime_ready = if !cfg.general.use_middle_proxy {
true
} else {
@ -194,10 +209,12 @@ pub(super) async fn build_runtime_gates_data(
conditional_cast_enabled: cfg.general.use_middle_proxy,
me_runtime_ready,
me2dc_fallback_enabled: cfg.general.me2dc_fallback,
me2dc_fast_enabled: fast_fallback_enabled,
use_middle_proxy: cfg.general.use_middle_proxy,
route_mode,
reroute_active,
reroute_to_direct_at_epoch_secs,
reroute_reason,
startup_status: startup_summary.status,
startup_stage: startup_summary.stage,
startup_progress_pct: startup_summary.progress_pct,
@ -263,6 +280,7 @@ pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsD
writer_pick_mode: me_writer_pick_mode_label(cfg.general.me_writer_pick_mode),
writer_pick_sample_size: cfg.general.me_writer_pick_sample_size,
me2dc_fallback: cfg.general.me2dc_fallback,
me2dc_fast: cfg.general.me2dc_fast,
},
user_ip_policy: EffectiveUserIpPolicyLimits {
global_each: cfg.access.user_max_unique_ips_global_each,

View File

@ -273,6 +273,10 @@ pub(crate) fn default_me2dc_fallback() -> bool {
true
}
pub(crate) fn default_me2dc_fast() -> bool {
false
}
pub(crate) fn default_keepalive_interval() -> u64 {
8
}

View File

@ -651,6 +651,9 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
}
if old.general.me_route_no_writer_mode != new.general.me_route_no_writer_mode
|| old.general.me_route_no_writer_wait_ms != new.general.me_route_no_writer_wait_ms
|| old.general.me_route_hybrid_max_wait_ms != new.general.me_route_hybrid_max_wait_ms
|| old.general.me_route_blocking_send_timeout_ms
!= new.general.me_route_blocking_send_timeout_ms
|| old.general.me_route_inline_recovery_attempts
!= new.general.me_route_inline_recovery_attempts
|| old.general.me_route_inline_recovery_wait_ms
@ -669,9 +672,11 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
warned = true;
warn!("config reload: general.me_init_retry_attempts changed; restart required");
}
if old.general.me2dc_fallback != new.general.me2dc_fallback {
if old.general.me2dc_fallback != new.general.me2dc_fallback
|| old.general.me2dc_fast != new.general.me2dc_fast
{
warned = true;
warn!("config reload: general.me2dc_fallback changed; restart required");
warn!("config reload: general.me2dc_fallback/me2dc_fast changed; restart required");
}
if old.general.proxy_config_v4_cache_path != new.general.proxy_config_v4_cache_path
|| old.general.proxy_config_v6_cache_path != new.general.proxy_config_v6_cache_path

View File

@ -346,6 +346,12 @@ impl ProxyConfig {
));
}
if config.timeouts.tg_connect == 0 {
return Err(ProxyError::Config(
"timeouts.tg_connect must be > 0".to_string(),
));
}
if config.general.upstream_unhealthy_fail_threshold == 0 {
return Err(ProxyError::Config(
"general.upstream_unhealthy_fail_threshold must be > 0".to_string(),
@ -1217,6 +1223,7 @@ mod tests {
default_me_init_retry_attempts()
);
assert_eq!(cfg.general.me2dc_fallback, default_me2dc_fallback());
assert_eq!(cfg.general.me2dc_fast, default_me2dc_fast());
assert_eq!(
cfg.general.proxy_config_v4_cache_path,
default_proxy_config_v4_cache_path()
@ -1356,6 +1363,7 @@ mod tests {
default_me_init_retry_attempts()
);
assert_eq!(general.me2dc_fallback, default_me2dc_fallback());
assert_eq!(general.me2dc_fast, default_me2dc_fast());
assert_eq!(
general.proxy_config_v4_cache_path,
default_proxy_config_v4_cache_path()
@ -1905,6 +1913,26 @@ mod tests {
let _ = std::fs::remove_file(path);
}
#[test]
fn tg_connect_zero_is_rejected() {
let toml = r#"
[timeouts]
tg_connect = 0
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_tg_connect_zero_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
assert!(err.contains("timeouts.tg_connect must be > 0"));
let _ = std::fs::remove_file(path);
}
#[test]
fn rpc_proxy_req_every_out_of_range_is_rejected() {
let toml = r#"

View File

@ -429,6 +429,11 @@ pub struct GeneralConfig {
#[serde(default = "default_me2dc_fallback")]
pub me2dc_fallback: bool,
/// Fast ME->Direct fallback mode for new sessions.
/// Active only when both `use_middle_proxy=true` and `me2dc_fallback=true`.
#[serde(default = "default_me2dc_fast")]
pub me2dc_fast: bool,
/// Enable ME keepalive padding frames.
#[serde(default = "default_true")]
pub me_keepalive_enabled: bool,
@ -939,6 +944,7 @@ impl Default for GeneralConfig {
middle_proxy_warm_standby: default_middle_proxy_warm_standby(),
me_init_retry_attempts: default_me_init_retry_attempts(),
me2dc_fallback: default_me2dc_fallback(),
me2dc_fast: default_me2dc_fast(),
me_keepalive_enabled: default_true(),
me_keepalive_interval_secs: default_keepalive_interval(),
me_keepalive_jitter_secs: default_keepalive_jitter(),

View File

@ -21,10 +21,29 @@ pub(crate) async fn configure_admission_gate(
if config.general.use_middle_proxy {
if let Some(pool) = me_pool.as_ref() {
let initial_ready = pool.admission_ready_conditional_cast().await;
admission_tx.send_replace(initial_ready);
let _ = route_runtime.set_mode(RelayRouteMode::Middle);
let mut fallback_enabled = config.general.me2dc_fallback;
let mut fast_fallback_enabled = fallback_enabled && config.general.me2dc_fast;
let (initial_gate_open, initial_route_mode, initial_fallback_reason) = if initial_ready
{
(true, RelayRouteMode::Middle, None)
} else if fast_fallback_enabled {
(
true,
RelayRouteMode::Direct,
Some("fast_not_ready_fallback"),
)
} else {
(false, RelayRouteMode::Middle, None)
};
admission_tx.send_replace(initial_gate_open);
let _ = route_runtime.set_mode(initial_route_mode);
if initial_ready {
info!("Conditional-admission gate: open / ME pool READY");
} else if let Some(reason) = initial_fallback_reason {
warn!(
fallback_reason = reason,
"Conditional-admission gate opened in ME fast fallback mode"
);
} else {
warn!("Conditional-admission gate: closed / ME pool is NOT ready)");
}
@ -34,10 +53,9 @@ pub(crate) async fn configure_admission_gate(
let route_runtime_gate = route_runtime.clone();
let mut config_rx_gate = config_rx.clone();
let mut admission_poll_ms = config.general.me_admission_poll_ms.max(1);
let mut fallback_enabled = config.general.me2dc_fallback;
tokio::spawn(async move {
let mut gate_open = initial_ready;
let mut route_mode = RelayRouteMode::Middle;
let mut gate_open = initial_gate_open;
let mut route_mode = initial_route_mode;
let mut ready_observed = initial_ready;
let mut not_ready_since = if initial_ready {
None
@ -53,16 +71,23 @@ pub(crate) async fn configure_admission_gate(
let cfg = config_rx_gate.borrow_and_update().clone();
admission_poll_ms = cfg.general.me_admission_poll_ms.max(1);
fallback_enabled = cfg.general.me2dc_fallback;
fast_fallback_enabled = cfg.general.me2dc_fallback && cfg.general.me2dc_fast;
continue;
}
_ = tokio::time::sleep(Duration::from_millis(admission_poll_ms)) => {}
}
let ready = pool_for_gate.admission_ready_conditional_cast().await;
let now = Instant::now();
let (next_gate_open, next_route_mode, next_fallback_active) = if ready {
let (next_gate_open, next_route_mode, next_fallback_reason) = if ready {
ready_observed = true;
not_ready_since = None;
(true, RelayRouteMode::Middle, false)
(true, RelayRouteMode::Middle, None)
} else if fast_fallback_enabled {
(
true,
RelayRouteMode::Direct,
Some("fast_not_ready_fallback"),
)
} else {
let not_ready_started_at = *not_ready_since.get_or_insert(now);
let not_ready_for = now.saturating_duration_since(not_ready_started_at);
@ -72,11 +97,12 @@ pub(crate) async fn configure_admission_gate(
STARTUP_FALLBACK_AFTER
};
if fallback_enabled && not_ready_for > fallback_after {
(true, RelayRouteMode::Direct, true)
(true, RelayRouteMode::Direct, Some("strict_grace_fallback"))
} else {
(false, RelayRouteMode::Middle, false)
(false, RelayRouteMode::Middle, None)
}
};
let next_fallback_active = next_fallback_reason.is_some();
if next_route_mode != route_mode {
route_mode = next_route_mode;
@ -88,17 +114,28 @@ pub(crate) async fn configure_admission_gate(
"Middle-End routing restored for new sessions"
);
} else {
let fallback_after = if ready_observed {
RUNTIME_FALLBACK_AFTER
let fallback_reason = next_fallback_reason.unwrap_or("unknown");
if fallback_reason == "strict_grace_fallback" {
let fallback_after = if ready_observed {
RUNTIME_FALLBACK_AFTER
} else {
STARTUP_FALLBACK_AFTER
};
warn!(
target_mode = route_mode.as_str(),
cutover_generation = snapshot.generation,
grace_secs = fallback_after.as_secs(),
fallback_reason,
"ME pool stayed not-ready beyond grace; routing new sessions via Direct-DC"
);
} else {
STARTUP_FALLBACK_AFTER
};
warn!(
target_mode = route_mode.as_str(),
cutover_generation = snapshot.generation,
grace_secs = fallback_after.as_secs(),
"ME pool stayed not-ready beyond grace; routing new sessions via Direct-DC"
);
warn!(
target_mode = route_mode.as_str(),
cutover_generation = snapshot.generation,
fallback_reason,
"ME pool not-ready; routing new sessions via Direct-DC (fast mode)"
);
}
}
}
}
@ -108,7 +145,10 @@ pub(crate) async fn configure_admission_gate(
admission_tx_gate.send_replace(gate_open);
if gate_open {
if next_fallback_active {
warn!("Conditional-admission gate opened in ME fallback mode");
warn!(
fallback_reason = next_fallback_reason.unwrap_or("unknown"),
"Conditional-admission gate opened in ME fallback mode"
);
} else {
info!("Conditional-admission gate opened / ME pool READY");
}

View File

@ -277,6 +277,8 @@ pub(crate) async fn initialize_me_pool(
config.general.me_warn_rate_limit_ms,
config.general.me_route_no_writer_mode,
config.general.me_route_no_writer_wait_ms,
config.general.me_route_hybrid_max_wait_ms,
config.general.me_route_blocking_send_timeout_ms,
config.general.me_route_inline_recovery_attempts,
config.general.me_route_inline_recovery_wait_ms,
);

View File

@ -115,15 +115,13 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
);
std::process::exit(1);
}
} else {
if let Err(e) = std::fs::create_dir_all(data_path) {
eprintln!(
"[telemt] Can't create data_path {}: {}",
data_path.display(),
e
);
std::process::exit(1);
}
} else if let Err(e) = std::fs::create_dir_all(data_path) {
eprintln!(
"[telemt] Can't create data_path {}: {}",
data_path.display(),
e
);
std::process::exit(1);
}
if let Err(e) = std::env::set_current_dir(data_path) {
@ -225,6 +223,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
config.general.upstream_connect_retry_attempts,
config.general.upstream_connect_retry_backoff_ms,
config.general.upstream_connect_budget_ms,
config.timeouts.tg_connect,
config.general.upstream_unhealthy_fail_threshold,
config.general.upstream_connect_failfast_hard_errors,
stats.clone(),

View File

@ -1558,6 +1558,40 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_endpoint_quarantine_unexpected_total ME endpoint quarantines caused by unexpected writer removals"
);
let _ = writeln!(
out,
"# TYPE telemt_me_endpoint_quarantine_unexpected_total counter"
);
let _ = writeln!(
out,
"telemt_me_endpoint_quarantine_unexpected_total {}",
if me_allows_normal {
stats.get_me_endpoint_quarantine_unexpected_total()
} else {
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_endpoint_quarantine_draining_suppressed_total Draining writer removals that skipped endpoint quarantine"
);
let _ = writeln!(
out,
"# TYPE telemt_me_endpoint_quarantine_draining_suppressed_total counter"
);
let _ = writeln!(
out,
"telemt_me_endpoint_quarantine_draining_suppressed_total {}",
if me_allows_normal {
stats.get_me_endpoint_quarantine_draining_suppressed_total()
} else {
0
}
);
let _ = writeln!(
out,
@ -2318,6 +2352,20 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_hybrid_timeout_total ME hybrid route timeouts after bounded retry window"
);
let _ = writeln!(out, "# TYPE telemt_me_hybrid_timeout_total counter");
let _ = writeln!(
out,
"telemt_me_hybrid_timeout_total {}",
if me_allows_normal {
stats.get_me_hybrid_timeout_total()
} else {
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_async_recovery_trigger_total Async ME recovery trigger attempts from route path"
@ -2608,6 +2656,9 @@ mod tests {
stats.increment_me_d2c_write_mode(crate::stats::MeD2cWriteMode::Coalesced);
stats.increment_me_d2c_quota_reject_total(crate::stats::MeD2cQuotaRejectStage::PostWrite);
stats.observe_me_d2c_frame_buf_shrink(4096);
stats.increment_me_endpoint_quarantine_total();
stats.increment_me_endpoint_quarantine_unexpected_total();
stats.increment_me_endpoint_quarantine_draining_suppressed_total();
stats.increment_user_connects("alice");
stats.increment_user_curr_connects("alice");
stats.add_user_octets_from("alice", 1024);
@ -2658,6 +2709,9 @@ mod tests {
assert!(output.contains("telemt_me_d2c_quota_reject_total{stage=\"post_write\"} 1"));
assert!(output.contains("telemt_me_d2c_frame_buf_shrink_total 1"));
assert!(output.contains("telemt_me_d2c_frame_buf_shrink_bytes_total 4096"));
assert!(output.contains("telemt_me_endpoint_quarantine_total 1"));
assert!(output.contains("telemt_me_endpoint_quarantine_unexpected_total 1"));
assert!(output.contains("telemt_me_endpoint_quarantine_draining_suppressed_total 1"));
assert!(output.contains("telemt_user_connections_total{user=\"alice\"} 1"));
assert!(output.contains("telemt_user_connections_current{user=\"alice\"} 1"));
assert!(output.contains("telemt_user_octets_from_client{user=\"alice\"} 1024"));
@ -2724,6 +2778,12 @@ mod tests {
assert!(output.contains("# TYPE telemt_me_d2c_write_mode_total counter"));
assert!(output.contains("# TYPE telemt_me_d2c_batch_frames_bucket_total counter"));
assert!(output.contains("# TYPE telemt_me_d2c_flush_duration_us_bucket_total counter"));
assert!(output.contains("# TYPE telemt_me_endpoint_quarantine_total counter"));
assert!(output.contains("# TYPE telemt_me_endpoint_quarantine_unexpected_total counter"));
assert!(
output
.contains("# TYPE telemt_me_endpoint_quarantine_draining_suppressed_total counter")
);
assert!(output.contains("# TYPE telemt_me_writer_removed_total counter"));
assert!(
output

View File

@ -4,7 +4,7 @@ use std::collections::{BTreeSet, HashMap};
use std::future::Future;
use std::hash::{BuildHasher, Hash};
use std::net::{IpAddr, SocketAddr};
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::{Arc, Mutex, OnceLock};
use std::time::{Duration, Instant};
@ -36,7 +36,6 @@ enum C2MeCommand {
const DESYNC_DEDUP_WINDOW: Duration = Duration::from_secs(60);
const DESYNC_DEDUP_MAX_ENTRIES: usize = 65_536;
const DESYNC_DEDUP_PRUNE_SCAN_LIMIT: usize = 1024;
const DESYNC_FULL_CACHE_EMIT_MIN_INTERVAL: Duration = Duration::from_millis(1000);
const DESYNC_ERROR_CLASS: &str = "frame_too_large_crypto_desync";
const C2ME_CHANNEL_CAPACITY_FALLBACK: usize = 128;
@ -46,10 +45,6 @@ const RELAY_IDLE_IO_POLL_MAX: Duration = Duration::from_secs(1);
const TINY_FRAME_DEBT_PER_TINY: u32 = 8;
const TINY_FRAME_DEBT_LIMIT: u32 = 512;
#[cfg(test)]
const C2ME_SEND_TIMEOUT: Duration = Duration::from_millis(50);
#[cfg(not(test))]
const C2ME_SEND_TIMEOUT: Duration = Duration::from_secs(5);
#[cfg(test)]
const RELAY_TEST_STEP_TIMEOUT: Duration = Duration::from_secs(1);
const ME_D2C_FLUSH_BATCH_MAX_FRAMES_MIN: usize = 1;
const ME_D2C_FLUSH_BATCH_MAX_BYTES_MIN: usize = 4096;
@ -57,12 +52,21 @@ const ME_D2C_FRAME_BUF_SHRINK_HYSTERESIS_FACTOR: usize = 2;
const ME_D2C_SINGLE_WRITE_COALESCE_MAX_BYTES: usize = 128 * 1024;
const QUOTA_RESERVE_SPIN_RETRIES: usize = 32;
static DESYNC_DEDUP: OnceLock<DashMap<u64, Instant>> = OnceLock::new();
static DESYNC_DEDUP_PREVIOUS: OnceLock<DashMap<u64, Instant>> = OnceLock::new();
static DESYNC_HASHER: OnceLock<RandomState> = OnceLock::new();
static DESYNC_FULL_CACHE_LAST_EMIT_AT: OnceLock<Mutex<Option<Instant>>> = OnceLock::new();
static DESYNC_DEDUP_EVER_SATURATED: OnceLock<AtomicBool> = OnceLock::new();
static DESYNC_DEDUP_ROTATION_STATE: OnceLock<Mutex<DesyncDedupRotationState>> = OnceLock::new();
// Invariant for async callers:
// this std::sync::Mutex is allowed only because critical sections are short,
// synchronous, and MUST never cross an `.await`.
static RELAY_IDLE_CANDIDATE_REGISTRY: OnceLock<Mutex<RelayIdleCandidateRegistry>> = OnceLock::new();
static RELAY_IDLE_MARK_SEQ: AtomicU64 = AtomicU64::new(0);
#[derive(Default)]
struct DesyncDedupRotationState {
current_started_at: Option<Instant>,
}
struct RelayForensicsState {
trace_id: u64,
conn_id: u64,
@ -95,6 +99,7 @@ fn relay_idle_candidate_registry() -> &'static Mutex<RelayIdleCandidateRegistry>
fn relay_idle_candidate_registry_lock() -> std::sync::MutexGuard<'static, RelayIdleCandidateRegistry>
{
// Keep lock scope narrow and synchronous: callers must drop guard before any `.await`.
let registry = relay_idle_candidate_registry();
match registry.lock() {
Ok(guard) => guard,
@ -312,64 +317,76 @@ fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool {
return true;
}
let dedup = DESYNC_DEDUP.get_or_init(DashMap::new);
let saturated_before = dedup.len() >= DESYNC_DEDUP_MAX_ENTRIES;
let ever_saturated = DESYNC_DEDUP_EVER_SATURATED.get_or_init(|| AtomicBool::new(false));
if saturated_before {
ever_saturated.store(true, Ordering::Relaxed);
}
let dedup_current = DESYNC_DEDUP.get_or_init(DashMap::new);
let dedup_previous = DESYNC_DEDUP_PREVIOUS.get_or_init(DashMap::new);
let rotation_state =
DESYNC_DEDUP_ROTATION_STATE.get_or_init(|| Mutex::new(DesyncDedupRotationState::default()));
if let Some(mut seen_at) = dedup.get_mut(&key) {
if now.duration_since(*seen_at) >= DESYNC_DEDUP_WINDOW {
*seen_at = now;
return true;
let mut state = match rotation_state.lock() {
Ok(guard) => guard,
Err(poisoned) => {
let mut guard = poisoned.into_inner();
*guard = DesyncDedupRotationState::default();
rotation_state.clear_poison();
guard
}
return false;
}
if dedup.len() >= DESYNC_DEDUP_MAX_ENTRIES {
let mut stale_keys = Vec::new();
let mut oldest_candidate: Option<(u64, Instant)> = None;
for entry in dedup.iter().take(DESYNC_DEDUP_PRUNE_SCAN_LIMIT) {
let key = *entry.key();
let seen_at = *entry.value();
match oldest_candidate {
Some((_, oldest_seen)) if seen_at >= oldest_seen => {}
_ => oldest_candidate = Some((key, seen_at)),
}
if now.duration_since(seen_at) >= DESYNC_DEDUP_WINDOW {
stale_keys.push(*entry.key());
}
}
for stale_key in stale_keys {
dedup.remove(&stale_key);
}
if dedup.len() >= DESYNC_DEDUP_MAX_ENTRIES {
let Some((evict_key, _)) = oldest_candidate else {
return false;
};
dedup.remove(&evict_key);
dedup.insert(key, now);
return should_emit_full_desync_full_cache(now);
}
}
dedup.insert(key, now);
let saturated_after = dedup.len() >= DESYNC_DEDUP_MAX_ENTRIES;
// Preserve the first sequential insert that reaches capacity as a normal
// emit, while still gating concurrent newcomer churn after the cache has
// ever been observed at saturation.
let was_ever_saturated = if saturated_after {
ever_saturated.swap(true, Ordering::Relaxed)
} else {
ever_saturated.load(Ordering::Relaxed)
};
if saturated_before || (saturated_after && was_ever_saturated) {
let rotate_now = match state.current_started_at {
Some(current_started_at) => match now.checked_duration_since(current_started_at) {
Some(elapsed) => elapsed >= DESYNC_DEDUP_WINDOW,
None => true,
},
None => true,
};
if rotate_now {
dedup_previous.clear();
for entry in dedup_current.iter() {
dedup_previous.insert(*entry.key(), *entry.value());
}
dedup_current.clear();
state.current_started_at = Some(now);
}
if let Some(seen_at) = dedup_current.get(&key).map(|entry| *entry.value()) {
let within_window = match now.checked_duration_since(seen_at) {
Some(elapsed) => elapsed < DESYNC_DEDUP_WINDOW,
None => true,
};
if within_window {
return false;
}
dedup_current.insert(key, now);
return true;
}
if let Some(seen_at) = dedup_previous.get(&key).map(|entry| *entry.value()) {
let within_window = match now.checked_duration_since(seen_at) {
Some(elapsed) => elapsed < DESYNC_DEDUP_WINDOW,
None => true,
};
if within_window {
// Keep the original timestamp when promoting from previous bucket,
// so dedup expiry remains tied to first-seen time.
dedup_current.insert(key, seen_at);
return false;
}
dedup_previous.remove(&key);
}
if dedup_current.len() >= DESYNC_DEDUP_MAX_ENTRIES {
// Bounded eviction path: rotate buckets instead of scanning/evicting
// arbitrary entries from a saturated single map.
dedup_previous.clear();
for entry in dedup_current.iter() {
dedup_previous.insert(*entry.key(), *entry.value());
}
dedup_current.clear();
state.current_started_at = Some(now);
dedup_current.insert(key, now);
should_emit_full_desync_full_cache(now)
} else {
dedup_current.insert(key, now);
true
}
}
@ -405,8 +422,20 @@ fn clear_desync_dedup_for_testing() {
if let Some(dedup) = DESYNC_DEDUP.get() {
dedup.clear();
}
if let Some(ever_saturated) = DESYNC_DEDUP_EVER_SATURATED.get() {
ever_saturated.store(false, Ordering::Relaxed);
if let Some(dedup_previous) = DESYNC_DEDUP_PREVIOUS.get() {
dedup_previous.clear();
}
if let Some(rotation_state) = DESYNC_DEDUP_ROTATION_STATE.get() {
match rotation_state.lock() {
Ok(mut guard) => {
*guard = DesyncDedupRotationState::default();
}
Err(poisoned) => {
let mut guard = poisoned.into_inner();
*guard = DesyncDedupRotationState::default();
rotation_state.clear_poison();
}
}
}
if let Some(last_emit_at) = DESYNC_FULL_CACHE_LAST_EMIT_AT.get() {
match last_emit_at.lock() {
@ -615,6 +644,7 @@ pub(crate) fn relay_idle_pressure_test_scope() -> std::sync::MutexGuard<'static,
async fn enqueue_c2me_command(
tx: &mpsc::Sender<C2MeCommand>,
cmd: C2MeCommand,
send_timeout: Option<Duration>,
) -> std::result::Result<(), mpsc::error::SendError<C2MeCommand>> {
match tx.try_send(cmd) {
Ok(()) => Ok(()),
@ -625,12 +655,18 @@ async fn enqueue_c2me_command(
if tx.capacity() <= C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS {
tokio::task::yield_now().await;
}
match timeout(C2ME_SEND_TIMEOUT, tx.reserve()).await {
Ok(Ok(permit)) => {
let reserve_result = match send_timeout {
Some(send_timeout) => match timeout(send_timeout, tx.reserve()).await {
Ok(result) => result,
Err(_) => return Err(mpsc::error::SendError(cmd)),
},
None => tx.reserve().await,
};
match reserve_result {
Ok(permit) => {
permit.send(cmd);
Ok(())
}
Ok(Err(_)) => Err(mpsc::error::SendError(cmd)),
Err(_) => Err(mpsc::error::SendError(cmd)),
}
}
@ -756,6 +792,10 @@ where
.general
.me_c2me_channel_capacity
.max(C2ME_CHANNEL_CAPACITY_FALLBACK);
let c2me_send_timeout = match config.general.me_c2me_send_timeout_ms {
0 => None,
timeout_ms => Some(Duration::from_millis(timeout_ms)),
};
let (c2me_tx, mut c2me_rx) = mpsc::channel::<C2MeCommand>(c2me_channel_capacity);
let me_pool_c2me = me_pool.clone();
let c2me_sender = tokio::spawn(async move {
@ -1132,7 +1172,7 @@ where
user = %user,
"Middle-relay pressure eviction for idle-candidate session"
);
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close, c2me_send_timeout).await;
main_result = Err(ProxyError::Proxy(
"middle-relay session evicted under pressure (idle-candidate)".to_string(),
));
@ -1151,7 +1191,7 @@ where
"Cutover affected middle session, closing client connection"
);
tokio::time::sleep(delay).await;
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close, c2me_send_timeout).await;
main_result = Err(ProxyError::Proxy(ROUTE_SWITCH_ERROR_MSG.to_string()));
break;
}
@ -1209,8 +1249,12 @@ where
flags |= RPC_FLAG_NOT_ENCRYPTED;
}
// Keep client read loop lightweight: route heavy ME send path via a dedicated task.
if enqueue_c2me_command(&c2me_tx, C2MeCommand::Data { payload, flags })
.await
if enqueue_c2me_command(
&c2me_tx,
C2MeCommand::Data { payload, flags },
c2me_send_timeout,
)
.await
.is_err()
{
main_result = Err(ProxyError::Proxy("ME sender channel closed".into()));
@ -1220,7 +1264,9 @@ where
Ok(None) => {
debug!(conn_id, "Client EOF");
client_closed = true;
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
let _ =
enqueue_c2me_command(&c2me_tx, C2MeCommand::Close, c2me_send_timeout)
.await;
break;
}
Err(e) => {

View File

@ -126,6 +126,7 @@ async fn c2me_channel_full_path_yields_then_sends() {
payload: make_pooled_payload(&[0xBB, 0xCC]),
flags: 2,
},
None,
)
.await
});

View File

@ -128,6 +128,8 @@ pub struct Stats {
me_crc_mismatch: AtomicU64,
me_seq_mismatch: AtomicU64,
me_endpoint_quarantine_total: AtomicU64,
me_endpoint_quarantine_unexpected_total: AtomicU64,
me_endpoint_quarantine_draining_suppressed_total: AtomicU64,
me_kdf_drift_total: AtomicU64,
me_kdf_port_only_drift_total: AtomicU64,
me_hardswap_pending_reuse_total: AtomicU64,
@ -234,6 +236,7 @@ pub struct Stats {
me_writer_restored_same_endpoint_total: AtomicU64,
me_writer_restored_fallback_total: AtomicU64,
me_no_writer_failfast_total: AtomicU64,
me_hybrid_timeout_total: AtomicU64,
me_async_recovery_trigger_total: AtomicU64,
me_inline_recovery_total: AtomicU64,
ip_reservation_rollback_tcp_limit_total: AtomicU64,
@ -1203,6 +1206,11 @@ impl Stats {
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_hybrid_timeout_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_hybrid_timeout_total.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_async_recovery_trigger_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_async_recovery_trigger_total
@ -1245,6 +1253,18 @@ impl Stats {
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_endpoint_quarantine_unexpected_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_endpoint_quarantine_unexpected_total
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_endpoint_quarantine_draining_suppressed_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_endpoint_quarantine_draining_suppressed_total
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_kdf_drift_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_kdf_drift_total.fetch_add(1, Ordering::Relaxed);
@ -1497,6 +1517,14 @@ impl Stats {
pub fn get_me_endpoint_quarantine_total(&self) -> u64 {
self.me_endpoint_quarantine_total.load(Ordering::Relaxed)
}
pub fn get_me_endpoint_quarantine_unexpected_total(&self) -> u64 {
self.me_endpoint_quarantine_unexpected_total
.load(Ordering::Relaxed)
}
pub fn get_me_endpoint_quarantine_draining_suppressed_total(&self) -> u64 {
self.me_endpoint_quarantine_draining_suppressed_total
.load(Ordering::Relaxed)
}
pub fn get_me_kdf_drift_total(&self) -> u64 {
self.me_kdf_drift_total.load(Ordering::Relaxed)
}
@ -1876,6 +1904,9 @@ impl Stats {
pub fn get_me_no_writer_failfast_total(&self) -> u64 {
self.me_no_writer_failfast_total.load(Ordering::Relaxed)
}
pub fn get_me_hybrid_timeout_total(&self) -> u64 {
self.me_hybrid_timeout_total.load(Ordering::Relaxed)
}
pub fn get_me_async_recovery_trigger_total(&self) -> u64 {
self.me_async_recovery_trigger_total.load(Ordering::Relaxed)
}

View File

@ -244,10 +244,9 @@ fn order_profiles(
if let Some(pos) = ordered
.iter()
.position(|profile| *profile == cached.profile)
&& pos != 0
{
if pos != 0 {
ordered.swap(0, pos);
}
ordered.swap(0, pos);
}
}

View File

@ -314,53 +314,6 @@ async fn run_update_cycle(
reinit_tx: &mpsc::Sender<MeReinitTrigger>,
) {
let upstream = pool.upstream.clone();
pool.update_runtime_reinit_policy(
cfg.general.hardswap,
cfg.general.me_pool_drain_ttl_secs,
cfg.general.me_instadrain,
cfg.general.me_pool_drain_threshold,
cfg.general.me_pool_drain_soft_evict_enabled,
cfg.general.me_pool_drain_soft_evict_grace_secs,
cfg.general.me_pool_drain_soft_evict_per_writer,
cfg.general.me_pool_drain_soft_evict_budget_per_core,
cfg.general.me_pool_drain_soft_evict_cooldown_ms,
cfg.general.effective_me_pool_force_close_secs(),
cfg.general.me_pool_min_fresh_ratio,
cfg.general.me_hardswap_warmup_delay_min_ms,
cfg.general.me_hardswap_warmup_delay_max_ms,
cfg.general.me_hardswap_warmup_extra_passes,
cfg.general.me_hardswap_warmup_pass_backoff_base_ms,
cfg.general.me_bind_stale_mode,
cfg.general.me_bind_stale_ttl_secs,
cfg.general.me_secret_atomic_snapshot,
cfg.general.me_deterministic_writer_sort,
cfg.general.me_writer_pick_mode,
cfg.general.me_writer_pick_sample_size,
cfg.general.me_single_endpoint_shadow_writers,
cfg.general.me_single_endpoint_outage_mode_enabled,
cfg.general.me_single_endpoint_outage_disable_quarantine,
cfg.general.me_single_endpoint_outage_backoff_min_ms,
cfg.general.me_single_endpoint_outage_backoff_max_ms,
cfg.general.me_single_endpoint_shadow_rotate_every_secs,
cfg.general.me_floor_mode,
cfg.general.me_adaptive_floor_idle_secs,
cfg.general.me_adaptive_floor_min_writers_single_endpoint,
cfg.general.me_adaptive_floor_min_writers_multi_endpoint,
cfg.general.me_adaptive_floor_recover_grace_secs,
cfg.general.me_adaptive_floor_writers_per_core_total,
cfg.general.me_adaptive_floor_cpu_cores_override,
cfg.general
.me_adaptive_floor_max_extra_writers_single_per_core,
cfg.general
.me_adaptive_floor_max_extra_writers_multi_per_core,
cfg.general.me_adaptive_floor_max_active_writers_per_core,
cfg.general.me_adaptive_floor_max_warm_writers_per_core,
cfg.general.me_adaptive_floor_max_active_writers_global,
cfg.general.me_adaptive_floor_max_warm_writers_global,
cfg.general.me_health_interval_ms_unhealthy,
cfg.general.me_health_interval_ms_healthy,
cfg.general.me_warn_rate_limit_ms,
);
let required_cfg_snapshots = cfg.general.me_config_stable_snapshots.max(1);
let required_secret_snapshots = cfg.general.proxy_secret_stable_snapshots.max(1);

View File

@ -161,7 +161,7 @@ impl MePool {
} else {
let connect_fut = async {
if addr.is_ipv6()
&& let Some(v6) = self.detected_ipv6
&& let Some(v6) = self.nat_runtime.detected_ipv6
{
match TcpSocket::new_v6() {
Ok(sock) => {
@ -305,7 +305,7 @@ impl MePool {
}
MeSocksKdfPolicy::Compat => {
self.stats.increment_me_socks_kdf_compat_fallback();
if self.nat_probe {
if self.nat_runtime.nat_probe {
let bind_ip = Self::direct_bind_ip_for_stun(family, upstream_egress);
self.maybe_reflect_public_addr(family, bind_ip).await
} else {
@ -313,7 +313,7 @@ impl MePool {
}
}
}
} else if self.nat_probe {
} else if self.nat_runtime.nat_probe {
let bind_ip = Self::direct_bind_ip_for_stun(family, upstream_egress);
self.maybe_reflect_public_addr(family, bind_ip).await
} else {
@ -343,7 +343,10 @@ impl MePool {
.unwrap_or_default()
.as_secs() as u32;
let secret_atomic_snapshot = self.secret_atomic_snapshot.load(Ordering::Relaxed);
let secret_atomic_snapshot = self
.writer_selection_policy
.secret_atomic_snapshot
.load(Ordering::Relaxed);
let (ks, secret) = if secret_atomic_snapshot {
let snapshot = self.secret_snapshot().await;
(snapshot.key_selector, snapshot.secret)

View File

@ -7,6 +7,8 @@ use std::sync::Arc;
use std::time::{Duration, Instant};
use rand::RngExt;
use tokio::sync::Semaphore;
use tokio::task::JoinSet;
use tracing::{debug, info, warn};
use crate::config::MeFloorMode;
@ -14,6 +16,7 @@ use crate::crypto::SecureRandom;
use crate::network::IpFamily;
use super::MePool;
use super::pool::MeFamilyRuntimeState;
const JITTER_FRAC_NUM: u64 = 2; // jitter up to 50% of backoff
#[allow(dead_code)]
@ -27,6 +30,9 @@ const HEALTH_RECONNECT_BUDGET_PER_CORE: usize = 2;
const HEALTH_RECONNECT_BUDGET_PER_DC: usize = 1;
const HEALTH_RECONNECT_BUDGET_MIN: usize = 4;
const HEALTH_RECONNECT_BUDGET_MAX: usize = 128;
const FAMILY_SUPPRESS_FAIL_STREAK_THRESHOLD: u32 = 5;
const FAMILY_SUPPRESS_DURATION_SECS: u64 = 60;
const FAMILY_RECOVER_SUCCESS_STREAK_TARGET: u32 = 2;
const HEALTH_DRAIN_CLOSE_BUDGET_PER_CORE: usize = 16;
const HEALTH_DRAIN_CLOSE_BUDGET_MIN: usize = 16;
const HEALTH_DRAIN_CLOSE_BUDGET_MAX: usize = 256;
@ -56,6 +62,17 @@ struct FamilyFloorPlan {
target_writers_total: usize,
}
#[derive(Debug)]
struct FamilyReconnectOutcome {
key: (i32, IpFamily),
dc: i32,
family: IpFamily,
alive: usize,
required: usize,
endpoint_count: usize,
restored: usize,
}
pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_connections: usize) {
let mut backoff: HashMap<(i32, IpFamily), u64> = HashMap::new();
let mut next_attempt: HashMap<(i32, IpFamily), Instant> = HashMap::new();
@ -78,6 +95,7 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
};
tokio::time::sleep(interval).await;
pool.prune_closed_writers().await;
pool.sweep_endpoint_quarantine().await;
reap_draining_writers(&pool, &mut drain_warn_next_allowed).await;
let v4_degraded = check_family(
IpFamily::V4,
@ -113,6 +131,8 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
&mut floor_warn_next_allowed,
)
.await;
update_family_runtime_state(&pool, IpFamily::V4, v4_degraded);
update_family_runtime_state(&pool, IpFamily::V6, v6_degraded);
degraded_interval = v4_degraded || v6_degraded;
}
}
@ -135,9 +155,11 @@ pub(super) async fn reap_draining_writers(
let now_epoch_secs = MePool::now_epoch_secs();
let now = Instant::now();
let drain_ttl_secs = pool
.drain_runtime
.me_pool_drain_ttl_secs
.load(std::sync::atomic::Ordering::Relaxed);
let drain_threshold = pool
.drain_runtime
.me_pool_drain_threshold
.load(std::sync::atomic::Ordering::Relaxed);
let activity = pool.registry.writer_activity_snapshot().await;
@ -221,7 +243,10 @@ pub(super) async fn reap_draining_writers(
endpoint = %writer.addr,
generation = writer.generation,
drain_ttl_secs,
force_close_secs = pool.me_pool_force_close_secs.load(std::sync::atomic::Ordering::Relaxed),
force_close_secs = pool
.drain_runtime
.me_pool_force_close_secs
.load(std::sync::atomic::Ordering::Relaxed),
allow_drain_fallback = writer.allow_drain_fallback,
"ME draining writer remains non-empty past drain TTL"
);
@ -365,7 +390,8 @@ async fn check_family(
endpoints.sort_unstable();
endpoints.dedup();
}
let mut reconnect_budget = health_reconnect_budget(pool, dc_endpoints.len());
let reconnect_budget = health_reconnect_budget(pool, dc_endpoints.len());
let reconnect_sem = Arc::new(Semaphore::new(reconnect_budget));
if pool.floor_mode() == MeFloorMode::Static {
adaptive_idle_since.clear();
@ -422,6 +448,10 @@ async fn check_family(
floor_plan.active_writers_current,
floor_plan.warm_writers_current,
);
let live_writer_ids_by_addr = Arc::new(live_writer_ids_by_addr);
let writer_idle_since = Arc::new(writer_idle_since);
let bound_clients_by_writer = Arc::new(bound_clients_by_writer);
let mut reconnect_set = JoinSet::<FamilyReconnectOutcome>::new();
for (dc, endpoints) in dc_endpoints {
if endpoints.is_empty() {
@ -461,7 +491,7 @@ async fn check_family(
required,
outage_backoff,
outage_next_attempt,
&mut reconnect_budget,
&reconnect_sem,
)
.await;
continue;
@ -495,9 +525,9 @@ async fn check_family(
&endpoints,
alive,
required,
&live_writer_ids_by_addr,
&writer_idle_since,
&bound_clients_by_writer,
live_writer_ids_by_addr.as_ref(),
writer_idle_since.as_ref(),
bound_clients_by_writer.as_ref(),
idle_refresh_next_attempt,
)
.await;
@ -510,8 +540,8 @@ async fn check_family(
&endpoints,
alive,
required,
&live_writer_ids_by_addr,
&bound_clients_by_writer,
live_writer_ids_by_addr.as_ref(),
bound_clients_by_writer.as_ref(),
shadow_rotate_deadline,
)
.await;
@ -521,8 +551,8 @@ async fn check_family(
family_degraded = true;
let now = Instant::now();
if reconnect_budget == 0 {
let base_ms = pool.me_reconnect_backoff_base.as_millis() as u64;
if reconnect_sem.available_permits() == 0 {
let base_ms = pool.reconnect_runtime.me_reconnect_backoff_base.as_millis() as u64;
let next_ms = (*backoff.get(&key).unwrap_or(&base_ms)).max(base_ms);
let jitter = next_ms / JITTER_FRAC_NUM;
let wait = Duration::from_millis(next_ms)
@ -545,7 +575,10 @@ async fn check_family(
continue;
}
let max_concurrent = pool.me_reconnect_max_concurrent_per_dc.max(1) as usize;
let max_concurrent = pool
.reconnect_runtime
.me_reconnect_max_concurrent_per_dc
.max(1) as usize;
if *inflight.get(&key).unwrap_or(&0) >= max_concurrent {
continue;
}
@ -564,117 +597,165 @@ async fn check_family(
continue;
}
*inflight.entry(key).or_insert(0) += 1;
let mut restored = 0usize;
for _ in 0..missing {
if reconnect_budget == 0 {
break;
}
reconnect_budget = reconnect_budget.saturating_sub(1);
if pool.active_contour_writer_count_total().await
>= floor_plan.active_cap_effective_total
{
let swapped = maybe_swap_idle_writer_for_cap(
pool,
rng,
dc,
family,
&endpoints,
&live_writer_ids_by_addr,
&writer_idle_since,
&bound_clients_by_writer,
let pool_for_reconnect = pool.clone();
let rng_for_reconnect = rng.clone();
let reconnect_sem_for_dc = reconnect_sem.clone();
let endpoints_for_dc = endpoints.clone();
let live_writer_ids_by_addr_for_dc = live_writer_ids_by_addr.clone();
let writer_idle_since_for_dc = writer_idle_since.clone();
let bound_clients_by_writer_for_dc = bound_clients_by_writer.clone();
let active_cap_effective_total = floor_plan.active_cap_effective_total;
reconnect_set.spawn(async move {
let mut restored = 0usize;
for _ in 0..missing {
let Ok(reconnect_permit) = reconnect_sem_for_dc.clone().try_acquire_owned() else {
break;
};
if pool_for_reconnect.active_contour_writer_count_total().await
>= active_cap_effective_total
{
let swapped = maybe_swap_idle_writer_for_cap(
&pool_for_reconnect,
&rng_for_reconnect,
dc,
family,
&endpoints_for_dc,
live_writer_ids_by_addr_for_dc.as_ref(),
writer_idle_since_for_dc.as_ref(),
bound_clients_by_writer_for_dc.as_ref(),
)
.await;
if swapped {
pool_for_reconnect
.stats
.increment_me_floor_swap_idle_total();
restored += 1;
continue;
}
pool_for_reconnect
.stats
.increment_me_floor_cap_block_total();
pool_for_reconnect
.stats
.increment_me_floor_swap_idle_failed_total();
debug!(
dc = %dc,
?family,
alive,
required,
active_cap_effective_total,
"Adaptive floor cap reached, reconnect attempt blocked"
);
break;
}
let res = tokio::time::timeout(
pool_for_reconnect.reconnect_runtime.me_one_timeout,
pool_for_reconnect.connect_endpoints_round_robin(
dc,
&endpoints_for_dc,
rng_for_reconnect.as_ref(),
),
)
.await;
if swapped {
pool.stats.increment_me_floor_swap_idle_total();
restored += 1;
continue;
match res {
Ok(true) => {
restored += 1;
pool_for_reconnect.stats.increment_me_reconnect_success();
}
Ok(false) => {
pool_for_reconnect.stats.increment_me_reconnect_attempt();
debug!(dc = %dc, ?family, "ME round-robin reconnect failed")
}
Err(_) => {
pool_for_reconnect.stats.increment_me_reconnect_attempt();
debug!(dc = %dc, ?family, "ME reconnect timed out");
}
}
pool.stats.increment_me_floor_cap_block_total();
pool.stats.increment_me_floor_swap_idle_failed_total();
debug!(
dc = %dc,
?family,
alive,
required,
active_cap_effective_total = floor_plan.active_cap_effective_total,
"Adaptive floor cap reached, reconnect attempt blocked"
);
break;
drop(reconnect_permit);
}
let res = tokio::time::timeout(
pool.me_one_timeout,
pool.connect_endpoints_round_robin(dc, &endpoints, rng.as_ref()),
)
.await;
match res {
Ok(true) => {
restored += 1;
pool.stats.increment_me_reconnect_success();
}
Ok(false) => {
pool.stats.increment_me_reconnect_attempt();
debug!(dc = %dc, ?family, "ME round-robin reconnect failed")
}
Err(_) => {
pool.stats.increment_me_reconnect_attempt();
debug!(dc = %dc, ?family, "ME reconnect timed out");
}
}
}
let now_alive = alive + restored;
if now_alive >= required {
info!(
dc = %dc,
?family,
alive = now_alive,
FamilyReconnectOutcome {
key,
dc,
family,
alive,
required,
endpoint_count = endpoints.len(),
endpoint_count: endpoints_for_dc.len(),
restored,
}
});
}
while let Some(joined) = reconnect_set.join_next().await {
let outcome = match joined {
Ok(outcome) => outcome,
Err(join_error) => {
debug!(error = %join_error, "Health reconnect task failed");
continue;
}
};
let now = Instant::now();
let now_alive = outcome.alive + outcome.restored;
if now_alive >= outcome.required {
info!(
dc = %outcome.dc,
family = ?outcome.family,
alive = now_alive,
required = outcome.required,
endpoint_count = outcome.endpoint_count,
"ME writer floor restored for DC"
);
backoff.insert(key, pool.me_reconnect_backoff_base.as_millis() as u64);
let jitter = pool.me_reconnect_backoff_base.as_millis() as u64 / JITTER_FRAC_NUM;
let wait = pool.me_reconnect_backoff_base
backoff.insert(
outcome.key,
pool.reconnect_runtime.me_reconnect_backoff_base.as_millis() as u64,
);
let jitter = pool.reconnect_runtime.me_reconnect_backoff_base.as_millis() as u64
/ JITTER_FRAC_NUM;
let wait = pool.reconnect_runtime.me_reconnect_backoff_base
+ Duration::from_millis(rand::rng().random_range(0..=jitter.max(1)));
next_attempt.insert(key, now + wait);
next_attempt.insert(outcome.key, now + wait);
} else {
let curr = *backoff
.get(&key)
.unwrap_or(&(pool.me_reconnect_backoff_base.as_millis() as u64));
let next_ms =
(curr.saturating_mul(2)).min(pool.me_reconnect_backoff_cap.as_millis() as u64);
backoff.insert(key, next_ms);
.get(&outcome.key)
.unwrap_or(&(pool.reconnect_runtime.me_reconnect_backoff_base.as_millis() as u64));
let next_ms = (curr.saturating_mul(2))
.min(pool.reconnect_runtime.me_reconnect_backoff_cap.as_millis() as u64);
backoff.insert(outcome.key, next_ms);
let jitter = next_ms / JITTER_FRAC_NUM;
let wait = Duration::from_millis(next_ms)
+ Duration::from_millis(rand::rng().random_range(0..=jitter.max(1)));
next_attempt.insert(key, now + wait);
next_attempt.insert(outcome.key, now + wait);
if pool.is_runtime_ready() {
let warn_cooldown = pool.warn_rate_limit_duration();
if should_emit_rate_limited_warn(floor_warn_next_allowed, key, now, warn_cooldown) {
if should_emit_rate_limited_warn(
floor_warn_next_allowed,
outcome.key,
now,
warn_cooldown,
) {
warn!(
dc = %dc,
?family,
dc = %outcome.dc,
family = ?outcome.family,
alive = now_alive,
required,
endpoint_count = endpoints.len(),
required = outcome.required,
endpoint_count = outcome.endpoint_count,
backoff_ms = next_ms,
"DC writer floor is below required level, scheduled reconnect"
);
}
} else {
info!(
dc = %dc,
?family,
dc = %outcome.dc,
family = ?outcome.family,
alive = now_alive,
required,
endpoint_count = endpoints.len(),
required = outcome.required,
endpoint_count = outcome.endpoint_count,
backoff_ms = next_ms,
"DC writer floor is below required level during startup, scheduled reconnect"
);
}
}
if let Some(v) = inflight.get_mut(&key) {
if let Some(v) = inflight.get_mut(&outcome.key) {
*v = v.saturating_sub(1);
}
}
@ -691,6 +772,68 @@ fn health_reconnect_budget(pool: &Arc<MePool>, dc_groups: usize) -> usize {
.clamp(HEALTH_RECONNECT_BUDGET_MIN, HEALTH_RECONNECT_BUDGET_MAX)
}
fn update_family_runtime_state(pool: &Arc<MePool>, family: IpFamily, degraded: bool) {
let now_epoch_secs = MePool::now_epoch_secs();
let previous_state = pool.family_runtime_state(family);
let mut state_since_epoch_secs = pool.family_runtime_state_since_epoch_secs(family);
let previous_suppressed_until_epoch_secs = pool.family_suppressed_until_epoch_secs(family);
let previous_fail_streak = pool.family_fail_streak(family);
let previous_recover_success_streak = pool.family_recover_success_streak(family);
let (next_state, suppressed_until_epoch_secs, fail_streak, recover_success_streak) =
if previous_suppressed_until_epoch_secs > now_epoch_secs {
let fail_streak = if degraded {
previous_fail_streak.saturating_add(1)
} else {
previous_fail_streak
};
(
MeFamilyRuntimeState::Suppressed,
previous_suppressed_until_epoch_secs,
fail_streak,
0,
)
} else if degraded {
let fail_streak = previous_fail_streak.saturating_add(1);
if fail_streak >= FAMILY_SUPPRESS_FAIL_STREAK_THRESHOLD {
(
MeFamilyRuntimeState::Suppressed,
now_epoch_secs.saturating_add(FAMILY_SUPPRESS_DURATION_SECS),
fail_streak,
0,
)
} else {
(MeFamilyRuntimeState::Degraded, 0, fail_streak, 0)
}
} else if matches!(previous_state, MeFamilyRuntimeState::Healthy) {
(MeFamilyRuntimeState::Healthy, 0, 0, 0)
} else {
let recover_success_streak = previous_recover_success_streak.saturating_add(1);
if recover_success_streak >= FAMILY_RECOVER_SUCCESS_STREAK_TARGET {
(MeFamilyRuntimeState::Healthy, 0, 0, 0)
} else {
(
MeFamilyRuntimeState::Recovering,
0,
0,
recover_success_streak,
)
}
};
if next_state != previous_state || state_since_epoch_secs == 0 {
state_since_epoch_secs = now_epoch_secs;
}
pool.set_family_runtime_state(
family,
next_state,
state_since_epoch_secs,
suppressed_until_epoch_secs,
fail_streak,
recover_success_streak,
);
}
fn should_emit_rate_limited_warn(
next_allowed: &mut HashMap<(i32, IpFamily), Instant>,
key: (i32, IpFamily),
@ -715,6 +858,7 @@ fn adaptive_floor_class_min(
) -> usize {
if endpoint_count <= 1 {
let min_single = (pool
.floor_runtime
.me_adaptive_floor_min_writers_single_endpoint
.load(std::sync::atomic::Ordering::Relaxed) as usize)
.max(1);
@ -971,7 +1115,7 @@ async fn maybe_swap_idle_writer_for_cap(
};
let connected = match tokio::time::timeout(
pool.me_one_timeout,
pool.reconnect_runtime.me_one_timeout,
pool.connect_one_for_dc(endpoint, dc, rng.as_ref()),
)
.await
@ -1077,7 +1221,7 @@ async fn maybe_refresh_idle_writer_for_dc(
};
let rotate_ok = match tokio::time::timeout(
pool.me_one_timeout,
pool.reconnect_runtime.me_one_timeout,
pool.connect_one_for_dc(endpoint, dc, rng.as_ref()),
)
.await
@ -1188,7 +1332,7 @@ async fn recover_single_endpoint_outage(
required: usize,
outage_backoff: &mut HashMap<(i32, IpFamily), u64>,
outage_next_attempt: &mut HashMap<(i32, IpFamily), Instant>,
reconnect_budget: &mut usize,
reconnect_sem: &Arc<Semaphore>,
) {
let now = Instant::now();
if let Some(ts) = outage_next_attempt.get(&key)
@ -1198,7 +1342,7 @@ async fn recover_single_endpoint_outage(
}
let (min_backoff_ms, max_backoff_ms) = pool.single_endpoint_outage_backoff_bounds_ms();
if *reconnect_budget == 0 {
if reconnect_sem.available_permits() == 0 {
outage_next_attempt.insert(key, now + Duration::from_millis(min_backoff_ms.max(250)));
debug!(
dc = %key.0,
@ -1209,7 +1353,17 @@ async fn recover_single_endpoint_outage(
);
return;
}
*reconnect_budget = (*reconnect_budget).saturating_sub(1);
let Ok(_reconnect_permit) = reconnect_sem.clone().try_acquire_owned() else {
outage_next_attempt.insert(key, now + Duration::from_millis(min_backoff_ms.max(250)));
debug!(
dc = %key.0,
family = ?key.1,
%endpoint,
required,
"Single-endpoint outage reconnect deferred by semaphore saturation"
);
return;
};
pool.stats
.increment_me_single_endpoint_outage_reconnect_attempt_total();
@ -1218,7 +1372,7 @@ async fn recover_single_endpoint_outage(
pool.stats
.increment_me_single_endpoint_quarantine_bypass_total();
match tokio::time::timeout(
pool.me_one_timeout,
pool.reconnect_runtime.me_one_timeout,
pool.connect_one_for_dc(endpoint, key.0, rng.as_ref()),
)
.await
@ -1247,7 +1401,7 @@ async fn recover_single_endpoint_outage(
} else {
let one_endpoint = [endpoint];
match tokio::time::timeout(
pool.me_one_timeout,
pool.reconnect_runtime.me_one_timeout,
pool.connect_endpoints_round_robin(key.0, &one_endpoint, rng.as_ref()),
)
.await
@ -1372,7 +1526,7 @@ async fn maybe_rotate_single_endpoint_shadow(
};
let rotate_ok = match tokio::time::timeout(
pool.me_one_timeout,
pool.reconnect_runtime.me_one_timeout,
pool.connect_one_for_dc(endpoint, dc, rng.as_ref()),
)
.await
@ -1687,6 +1841,8 @@ mod tests {
general.me_warn_rate_limit_ms,
MeRouteNoWriterMode::default(),
general.me_route_no_writer_wait_ms,
general.me_route_hybrid_max_wait_ms,
general.me_route_blocking_send_timeout_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
)

File diff suppressed because it is too large Load Diff

View File

@ -72,7 +72,7 @@ impl MePool {
}
if changed {
self.rebuild_endpoint_dc_map().await;
self.writer_available.notify_waiters();
self.notify_writer_epoch();
}
if changed {
SnapshotApplyOutcome::AppliedChanged
@ -112,7 +112,7 @@ impl MePool {
pub async fn reconnect_all(self: &Arc<Self>) {
let ws = self.writers.read().await.clone();
for w in ws {
for w in ws.iter() {
if let Ok(()) = self
.connect_one_for_dc(w.addr, w.writer_dc, self.rng.as_ref())
.await

View File

@ -14,7 +14,10 @@ use super::pool::MePool;
impl MePool {
pub async fn init(self: &Arc<Self>, pool_size: usize, rng: &Arc<SecureRandom>) -> Result<()> {
let family_order = self.family_order();
let connect_concurrency = self.me_reconnect_max_concurrent_per_dc.max(1) as usize;
let connect_concurrency = self
.reconnect_runtime
.me_reconnect_max_concurrent_per_dc
.max(1) as usize;
let ks = self.key_selector().await;
info!(
me_servers = self.proxy_map_v4.read().await.len(),
@ -250,10 +253,12 @@ impl MePool {
return false;
}
if self.me_warmup_stagger_enabled {
let jitter =
rand::rng().random_range(0..=self.me_warmup_step_jitter.as_millis() as u64);
let delay_ms = self.me_warmup_step_delay.as_millis() as u64 + jitter;
if self.reconnect_runtime.me_warmup_stagger_enabled {
let jitter = rand::rng().random_range(
0..=self.reconnect_runtime.me_warmup_step_jitter.as_millis() as u64,
);
let delay_ms =
self.reconnect_runtime.me_warmup_step_delay.as_millis() as u64 + jitter;
tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await;
}
}

View File

@ -42,10 +42,10 @@ pub async fn detect_public_ip() -> Option<IpAddr> {
impl MePool {
fn configured_stun_servers(&self) -> Vec<String> {
if !self.nat_stun_servers.is_empty() {
return self.nat_stun_servers.clone();
if !self.nat_runtime.nat_stun_servers.is_empty() {
return self.nat_runtime.nat_stun_servers.clone();
}
if let Some(s) = &self.nat_stun
if let Some(s) = &self.nat_runtime.nat_stun
&& !s.trim().is_empty()
{
return vec![s.clone()];
@ -64,7 +64,7 @@ impl MePool {
let mut next_idx = 0usize;
let mut live_servers = Vec::new();
let mut best_by_ip: HashMap<IpAddr, (usize, std::net::SocketAddr)> = HashMap::new();
let concurrency = self.nat_probe_concurrency.max(1);
let concurrency = self.nat_runtime.nat_probe_concurrency.max(1);
while next_idx < servers.len() || !join_set.is_empty() {
while next_idx < servers.len() && join_set.len() < concurrency {
@ -137,9 +137,13 @@ impl MePool {
}
pub(super) fn translate_ip_for_nat(&self, ip: IpAddr) -> IpAddr {
let nat_ip = self
.nat_ip_cfg
.or_else(|| self.nat_ip_detected.try_read().ok().and_then(|g| *g));
let nat_ip = self.nat_runtime.nat_ip_cfg.or_else(|| {
self.nat_runtime
.nat_ip_detected
.try_read()
.ok()
.and_then(|g| *g)
});
let Some(nat_ip) = nat_ip else {
return ip;
@ -163,7 +167,7 @@ impl MePool {
addr: std::net::SocketAddr,
reflected: Option<std::net::SocketAddr>,
) -> std::net::SocketAddr {
let ip = if let Some(nat_ip) = self.nat_ip_cfg {
let ip = if let Some(nat_ip) = self.nat_runtime.nat_ip_cfg {
match (addr.ip(), nat_ip) {
(IpAddr::V4(_), IpAddr::V4(dst)) => IpAddr::V4(dst),
(IpAddr::V6(_), IpAddr::V6(dst)) => IpAddr::V6(dst),
@ -185,22 +189,22 @@ impl MePool {
}
pub(super) async fn maybe_detect_nat_ip(&self, local_ip: IpAddr) -> Option<IpAddr> {
if self.nat_ip_cfg.is_some() {
return self.nat_ip_cfg;
if self.nat_runtime.nat_ip_cfg.is_some() {
return self.nat_runtime.nat_ip_cfg;
}
if !(is_bogon(local_ip) || local_ip.is_loopback() || local_ip.is_unspecified()) {
return None;
}
if let Some(ip) = *self.nat_ip_detected.read().await {
if let Some(ip) = *self.nat_runtime.nat_ip_detected.read().await {
return Some(ip);
}
match fetch_public_ipv4_with_retry().await {
Ok(Some(ip)) => {
{
let mut guard = self.nat_ip_detected.write().await;
let mut guard = self.nat_runtime.nat_ip_detected.write().await;
*guard = Some(IpAddr::V4(ip));
}
info!(public_ip = %ip, "Auto-detected public IP for NAT translation");
@ -231,10 +235,10 @@ impl MePool {
}
// Backoff window
if use_shared_cache
&& let Some(until) = *self.stun_backoff_until.read().await
&& let Some(until) = *self.nat_runtime.stun_backoff_until.read().await
&& Instant::now() < until
{
if let Ok(cache) = self.nat_reflection_cache.try_lock() {
if let Ok(cache) = self.nat_runtime.nat_reflection_cache.try_lock() {
let slot = match family {
IpFamily::V4 => cache.v4,
IpFamily::V6 => cache.v6,
@ -244,7 +248,8 @@ impl MePool {
return None;
}
if use_shared_cache && let Ok(mut cache) = self.nat_reflection_cache.try_lock() {
if use_shared_cache && let Ok(mut cache) = self.nat_runtime.nat_reflection_cache.try_lock()
{
let slot = match family {
IpFamily::V4 => &mut cache.v4,
IpFamily::V6 => &mut cache.v6,
@ -258,18 +263,18 @@ impl MePool {
let _singleflight_guard = if use_shared_cache {
Some(match family {
IpFamily::V4 => self.nat_reflection_singleflight_v4.lock().await,
IpFamily::V6 => self.nat_reflection_singleflight_v6.lock().await,
IpFamily::V4 => self.nat_runtime.nat_reflection_singleflight_v4.lock().await,
IpFamily::V6 => self.nat_runtime.nat_reflection_singleflight_v6.lock().await,
})
} else {
None
};
if use_shared_cache
&& let Some(until) = *self.stun_backoff_until.read().await
&& let Some(until) = *self.nat_runtime.stun_backoff_until.read().await
&& Instant::now() < until
{
if let Ok(cache) = self.nat_reflection_cache.try_lock() {
if let Ok(cache) = self.nat_runtime.nat_reflection_cache.try_lock() {
let slot = match family {
IpFamily::V4 => cache.v4,
IpFamily::V6 => cache.v6,
@ -279,7 +284,8 @@ impl MePool {
return None;
}
if use_shared_cache && let Ok(mut cache) = self.nat_reflection_cache.try_lock() {
if use_shared_cache && let Ok(mut cache) = self.nat_runtime.nat_reflection_cache.try_lock()
{
let slot = match family {
IpFamily::V4 => &mut cache.v4,
IpFamily::V6 => &mut cache.v6,
@ -292,13 +298,14 @@ impl MePool {
}
let attempt = if use_shared_cache {
self.nat_probe_attempts
self.nat_runtime
.nat_probe_attempts
.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
} else {
0
};
let configured_servers = self.configured_stun_servers();
let live_snapshot = self.nat_stun_live_servers.read().await.clone();
let live_snapshot = self.nat_runtime.nat_stun_live_servers.read().await.clone();
let primary_servers = if live_snapshot.is_empty() {
configured_servers.clone()
} else {
@ -322,14 +329,15 @@ impl MePool {
let live_server_count = live_servers.len();
if !live_servers.is_empty() {
*self.nat_stun_live_servers.write().await = live_servers;
*self.nat_runtime.nat_stun_live_servers.write().await = live_servers;
} else {
self.nat_stun_live_servers.write().await.clear();
self.nat_runtime.nat_stun_live_servers.write().await.clear();
}
if let Some(reflected_addr) = selected_reflected {
if use_shared_cache {
self.nat_probe_attempts
self.nat_runtime
.nat_probe_attempts
.store(0, std::sync::atomic::Ordering::Relaxed);
}
info!(
@ -338,7 +346,9 @@ impl MePool {
"STUN-Quorum reached, IP: {}",
reflected_addr.ip()
);
if use_shared_cache && let Ok(mut cache) = self.nat_reflection_cache.try_lock() {
if use_shared_cache
&& let Ok(mut cache) = self.nat_runtime.nat_reflection_cache.try_lock()
{
let slot = match family {
IpFamily::V4 => &mut cache.v4,
IpFamily::V6 => &mut cache.v6,
@ -350,7 +360,7 @@ impl MePool {
if use_shared_cache {
let backoff = Duration::from_secs(60 * 2u64.pow((attempt as u32).min(6)));
*self.stun_backoff_until.write().await = Some(Instant::now() + backoff);
*self.nat_runtime.stun_backoff_until.write().await = Some(Instant::now() + backoff);
}
None
}

View File

@ -13,13 +13,40 @@ use super::pool::{MePool, RefillDcKey, RefillEndpointKey, WriterContour};
const ME_FLAP_UPTIME_THRESHOLD_SECS: u64 = 20;
const ME_FLAP_QUARANTINE_SECS: u64 = 25;
const ME_FLAP_MIN_UPTIME_MILLIS: u64 = 500;
const ME_REFILL_TOTAL_ATTEMPT_CAP: u32 = 20;
impl MePool {
pub(super) async fn sweep_endpoint_quarantine(&self) {
let configured = self
.endpoint_dc_map
.read()
.await
.keys()
.copied()
.collect::<HashSet<SocketAddr>>();
let now = Instant::now();
let mut guard = self.endpoint_quarantine.lock().await;
guard.retain(|addr, expiry| *expiry > now && configured.contains(addr));
}
pub(super) async fn maybe_quarantine_flapping_endpoint(
&self,
addr: SocketAddr,
uptime: Duration,
reason: &'static str,
) {
if uptime < Duration::from_millis(ME_FLAP_MIN_UPTIME_MILLIS) {
debug!(
%addr,
reason,
uptime_ms = uptime.as_millis(),
min_uptime_ms = ME_FLAP_MIN_UPTIME_MILLIS,
"Skipping flap quarantine for ultra-short writer lifetime"
);
return;
}
if uptime > Duration::from_secs(ME_FLAP_UPTIME_THRESHOLD_SECS) {
return;
}
@ -31,6 +58,7 @@ impl MePool {
self.stats.increment_me_endpoint_quarantine_total();
warn!(
%addr,
reason,
uptime_ms = uptime.as_millis(),
quarantine_secs = ME_FLAP_QUARANTINE_SECS,
"ME endpoint temporarily quarantined due to rapid writer flap"
@ -205,11 +233,16 @@ impl MePool {
}
async fn refill_writer_after_loss(self: &Arc<Self>, addr: SocketAddr, writer_dc: i32) -> bool {
let fast_retries = self.me_reconnect_fast_retry_count.max(1);
let fast_retries = self.reconnect_runtime.me_reconnect_fast_retry_count.max(1);
let mut total_attempts = 0u32;
let same_endpoint_quarantined = self.is_endpoint_quarantined(addr).await;
if !same_endpoint_quarantined {
for attempt in 0..fast_retries {
if total_attempts >= ME_REFILL_TOTAL_ATTEMPT_CAP {
break;
}
total_attempts = total_attempts.saturating_add(1);
self.stats.increment_me_reconnect_attempt();
match self
.connect_one_for_dc(addr, writer_dc, self.rng.as_ref())
@ -250,6 +283,10 @@ impl MePool {
}
for attempt in 0..fast_retries {
if total_attempts >= ME_REFILL_TOTAL_ATTEMPT_CAP {
break;
}
total_attempts = total_attempts.saturating_add(1);
self.stats.increment_me_reconnect_attempt();
if self
.connect_endpoints_round_robin(writer_dc, &dc_endpoints, self.rng.as_ref())

View File

@ -37,16 +37,23 @@ impl MePool {
}
fn clear_pending_hardswap_state(&self) {
self.pending_hardswap_generation.store(0, Ordering::Relaxed);
self.pending_hardswap_started_at_epoch_secs
self.reinit
.pending_hardswap_generation
.store(0, Ordering::Relaxed);
self.pending_hardswap_map_hash.store(0, Ordering::Relaxed);
self.warm_generation.store(0, Ordering::Relaxed);
self.reinit
.pending_hardswap_started_at_epoch_secs
.store(0, Ordering::Relaxed);
self.reinit
.pending_hardswap_map_hash
.store(0, Ordering::Relaxed);
self.reinit.warm_generation.store(0, Ordering::Relaxed);
}
async fn promote_warm_generation_to_active(&self, generation: u64) {
self.active_generation.store(generation, Ordering::Relaxed);
self.warm_generation.store(0, Ordering::Relaxed);
self.reinit
.active_generation
.store(generation, Ordering::Relaxed);
self.reinit.warm_generation.store(0, Ordering::Relaxed);
let ws = self.writers.read().await;
for writer in ws.iter() {
@ -184,8 +191,14 @@ impl MePool {
}
fn hardswap_warmup_connect_delay_ms(&self) -> u64 {
let min_ms = self.me_hardswap_warmup_delay_min_ms.load(Ordering::Relaxed);
let max_ms = self.me_hardswap_warmup_delay_max_ms.load(Ordering::Relaxed);
let min_ms = self
.reinit
.me_hardswap_warmup_delay_min_ms
.load(Ordering::Relaxed);
let max_ms = self
.reinit
.me_hardswap_warmup_delay_max_ms
.load(Ordering::Relaxed);
let (min_ms, max_ms) = if min_ms <= max_ms {
(min_ms, max_ms)
} else {
@ -199,9 +212,11 @@ impl MePool {
fn hardswap_warmup_backoff_ms(&self, pass_idx: usize) -> u64 {
let base_ms = self
.reinit
.me_hardswap_warmup_pass_backoff_base_ms
.load(Ordering::Relaxed);
let cap_ms = (self.me_reconnect_backoff_cap.as_millis() as u64).max(base_ms);
let cap_ms =
(self.reconnect_runtime.me_reconnect_backoff_cap.as_millis() as u64).max(base_ms);
let shift = (pass_idx as u32).min(20);
let scaled = base_ms.saturating_mul(1u64 << shift);
let core = scaled.min(cap_ms);
@ -244,6 +259,7 @@ impl MePool {
desired_by_dc: &HashMap<i32, HashSet<SocketAddr>>,
) {
let extra_passes = self
.reinit
.me_hardswap_warmup_extra_passes
.load(Ordering::Relaxed)
.min(10) as usize;
@ -369,13 +385,20 @@ impl MePool {
let desired_map_hash = Self::desired_map_hash(&desired_by_dc);
let previous_generation = self.current_generation();
let hardswap = self.hardswap.load(Ordering::Relaxed);
let hardswap = self.reinit.hardswap.load(Ordering::Relaxed);
let generation = if hardswap {
let pending_generation = self.pending_hardswap_generation.load(Ordering::Relaxed);
let pending_generation = self
.reinit
.pending_hardswap_generation
.load(Ordering::Relaxed);
let pending_started_at = self
.reinit
.pending_hardswap_started_at_epoch_secs
.load(Ordering::Relaxed);
let pending_map_hash = self.pending_hardswap_map_hash.load(Ordering::Relaxed);
let pending_map_hash = self
.reinit
.pending_hardswap_map_hash
.load(Ordering::Relaxed);
let pending_age_secs = now_epoch_secs.saturating_sub(pending_started_at);
let pending_ttl_expired =
pending_started_at > 0 && pending_age_secs > ME_HARDSWAP_PENDING_TTL_SECS;
@ -405,24 +428,30 @@ impl MePool {
"ME hardswap pending generation expired by TTL; starting fresh generation"
);
}
let next_generation = self.generation.fetch_add(1, Ordering::Relaxed) + 1;
self.pending_hardswap_generation
let next_generation = self.reinit.generation.fetch_add(1, Ordering::Relaxed) + 1;
self.reinit
.pending_hardswap_generation
.store(next_generation, Ordering::Relaxed);
self.pending_hardswap_started_at_epoch_secs
self.reinit
.pending_hardswap_started_at_epoch_secs
.store(now_epoch_secs, Ordering::Relaxed);
self.pending_hardswap_map_hash
self.reinit
.pending_hardswap_map_hash
.store(desired_map_hash, Ordering::Relaxed);
self.warm_generation
self.reinit
.warm_generation
.store(next_generation, Ordering::Relaxed);
next_generation
}
} else {
self.clear_pending_hardswap_state();
self.generation.fetch_add(1, Ordering::Relaxed) + 1
self.reinit.generation.fetch_add(1, Ordering::Relaxed) + 1
};
if hardswap {
self.warm_generation.store(generation, Ordering::Relaxed);
self.reinit
.warm_generation
.store(generation, Ordering::Relaxed);
self.warmup_generation_for_all_dcs(rng, generation, &desired_by_dc)
.await;
} else {
@ -436,7 +465,8 @@ impl MePool {
.map(|w| (w.writer_dc, w.addr))
.collect();
let min_ratio = Self::permille_to_ratio(
self.me_pool_min_fresh_ratio_permille
self.drain_runtime
.me_pool_min_fresh_ratio_permille
.load(Ordering::Relaxed),
);
let (coverage_ratio, missing_dc) =

View File

@ -94,9 +94,9 @@ impl MePool {
pub(crate) async fn api_nat_stun_snapshot(&self) -> MeApiNatStunSnapshot {
let now = Instant::now();
let mut configured_servers = if !self.nat_stun_servers.is_empty() {
self.nat_stun_servers.clone()
} else if let Some(stun) = &self.nat_stun {
let mut configured_servers = if !self.nat_runtime.nat_stun_servers.is_empty() {
self.nat_runtime.nat_stun_servers.clone()
} else if let Some(stun) = &self.nat_runtime.nat_stun {
if stun.trim().is_empty() {
Vec::new()
} else {
@ -108,11 +108,11 @@ impl MePool {
configured_servers.sort();
configured_servers.dedup();
let mut live_servers = self.nat_stun_live_servers.read().await.clone();
let mut live_servers = self.nat_runtime.nat_stun_live_servers.read().await.clone();
live_servers.sort();
live_servers.dedup();
let reflection = self.nat_reflection_cache.lock().await;
let reflection = self.nat_runtime.nat_reflection_cache.lock().await;
let reflection_v4 = reflection.v4.map(|(ts, addr)| MeApiNatReflectionSnapshot {
addr,
age_secs: now.saturating_duration_since(ts).as_secs(),
@ -123,17 +123,19 @@ impl MePool {
});
drop(reflection);
let backoff_until = *self.stun_backoff_until.read().await;
let backoff_until = *self.nat_runtime.stun_backoff_until.read().await;
let stun_backoff_remaining_ms = backoff_until.and_then(|until| {
(until > now).then_some(until.duration_since(now).as_millis() as u64)
});
MeApiNatStunSnapshot {
nat_probe_enabled: self.nat_probe,
nat_probe_enabled: self.nat_runtime.nat_probe,
nat_probe_disabled_runtime: self
.nat_runtime
.nat_probe_disabled
.load(std::sync::atomic::Ordering::Relaxed),
nat_probe_attempts: self
.nat_runtime
.nat_probe_attempts
.load(std::sync::atomic::Ordering::Relaxed),
configured_servers,

View File

@ -160,7 +160,7 @@ impl MePool {
let writers = self.writers.read().await.clone();
let mut live_writers_by_dc = HashMap::<i16, usize>::new();
for writer in writers {
for writer in writers.iter() {
if writer.draining.load(Ordering::Relaxed) {
continue;
}
@ -197,7 +197,7 @@ impl MePool {
let writers = self.writers.read().await.clone();
let mut live_writers_by_dc = HashMap::<i16, usize>::new();
for writer in writers {
for writer in writers.iter() {
if writer.draining.load(Ordering::Relaxed) {
continue;
}
@ -224,7 +224,10 @@ impl MePool {
pub(crate) async fn api_status_snapshot(&self) -> MeApiStatusSnapshot {
let now_epoch_secs = Self::now_epoch_secs();
let active_generation = self.current_generation();
let drain_ttl_secs = self.me_pool_drain_ttl_secs.load(Ordering::Relaxed);
let drain_ttl_secs = self
.drain_runtime
.me_pool_drain_ttl_secs
.load(Ordering::Relaxed);
let mut endpoints_by_dc = BTreeMap::<i16, BTreeSet<SocketAddr>>::new();
if self.decision.ipv4_me {
@ -255,7 +258,7 @@ impl MePool {
let mut dc_rtt_agg = HashMap::<i16, (f64, u64)>::new();
let mut writer_rows = Vec::<MeApiWriterStatusSnapshot>::with_capacity(writers.len());
for writer in writers {
for writer in writers.iter() {
let endpoint = writer.addr;
let dc = i16::try_from(writer.writer_dc).ok();
let draining = writer.draining.load(Ordering::Relaxed);
@ -336,6 +339,7 @@ impl MePool {
let mut fresh_alive_writers = 0usize;
let floor_mode = self.floor_mode();
let adaptive_cpu_cores = (self
.floor_runtime
.me_adaptive_floor_cpu_cores_effective
.load(Ordering::Relaxed) as usize)
.max(1);
@ -350,22 +354,26 @@ impl MePool {
self.required_writers_for_dc_with_floor_mode(endpoint_count, false);
let floor_min = if endpoint_count <= 1 {
(self
.floor_runtime
.me_adaptive_floor_min_writers_single_endpoint
.load(Ordering::Relaxed) as usize)
.max(1)
.min(base_required.max(1))
} else {
(self
.floor_runtime
.me_adaptive_floor_min_writers_multi_endpoint
.load(Ordering::Relaxed) as usize)
.max(1)
.min(base_required.max(1))
};
let extra_per_core = if endpoint_count <= 1 {
self.me_adaptive_floor_max_extra_writers_single_per_core
self.floor_runtime
.me_adaptive_floor_max_extra_writers_single_per_core
.load(Ordering::Relaxed) as usize
} else {
self.me_adaptive_floor_max_extra_writers_multi_per_core
self.floor_runtime
.me_adaptive_floor_max_extra_writers_multi_per_core
.load(Ordering::Relaxed) as usize
};
let floor_max =
@ -436,6 +444,7 @@ impl MePool {
let now = Instant::now();
let now_epoch_secs = Self::now_epoch_secs();
let pending_started_at = self
.reinit
.pending_hardswap_started_at_epoch_secs
.load(Ordering::Relaxed);
let pending_hardswap_age_secs =
@ -477,119 +486,175 @@ impl MePool {
}
MeApiRuntimeSnapshot {
active_generation: self.active_generation.load(Ordering::Relaxed),
warm_generation: self.warm_generation.load(Ordering::Relaxed),
pending_hardswap_generation: self.pending_hardswap_generation.load(Ordering::Relaxed),
active_generation: self.reinit.active_generation.load(Ordering::Relaxed),
warm_generation: self.reinit.warm_generation.load(Ordering::Relaxed),
pending_hardswap_generation: self
.reinit
.pending_hardswap_generation
.load(Ordering::Relaxed),
pending_hardswap_age_secs,
hardswap_enabled: self.hardswap.load(Ordering::Relaxed),
hardswap_enabled: self.reinit.hardswap.load(Ordering::Relaxed),
floor_mode: floor_mode_label(self.floor_mode()),
adaptive_floor_idle_secs: self.me_adaptive_floor_idle_secs.load(Ordering::Relaxed),
adaptive_floor_idle_secs: self
.floor_runtime
.me_adaptive_floor_idle_secs
.load(Ordering::Relaxed),
adaptive_floor_min_writers_single_endpoint: self
.floor_runtime
.me_adaptive_floor_min_writers_single_endpoint
.load(Ordering::Relaxed),
adaptive_floor_min_writers_multi_endpoint: self
.floor_runtime
.me_adaptive_floor_min_writers_multi_endpoint
.load(Ordering::Relaxed),
adaptive_floor_recover_grace_secs: self
.floor_runtime
.me_adaptive_floor_recover_grace_secs
.load(Ordering::Relaxed),
adaptive_floor_writers_per_core_total: self
.floor_runtime
.me_adaptive_floor_writers_per_core_total
.load(Ordering::Relaxed) as u16,
adaptive_floor_cpu_cores_override: self
.floor_runtime
.me_adaptive_floor_cpu_cores_override
.load(Ordering::Relaxed) as u16,
adaptive_floor_max_extra_writers_single_per_core: self
.floor_runtime
.me_adaptive_floor_max_extra_writers_single_per_core
.load(Ordering::Relaxed)
as u16,
adaptive_floor_max_extra_writers_multi_per_core: self
.floor_runtime
.me_adaptive_floor_max_extra_writers_multi_per_core
.load(Ordering::Relaxed)
as u16,
adaptive_floor_max_active_writers_per_core: self
.floor_runtime
.me_adaptive_floor_max_active_writers_per_core
.load(Ordering::Relaxed)
as u16,
adaptive_floor_max_warm_writers_per_core: self
.floor_runtime
.me_adaptive_floor_max_warm_writers_per_core
.load(Ordering::Relaxed)
as u16,
adaptive_floor_max_active_writers_global: self
.floor_runtime
.me_adaptive_floor_max_active_writers_global
.load(Ordering::Relaxed),
adaptive_floor_max_warm_writers_global: self
.floor_runtime
.me_adaptive_floor_max_warm_writers_global
.load(Ordering::Relaxed),
adaptive_floor_cpu_cores_detected: self
.floor_runtime
.me_adaptive_floor_cpu_cores_detected
.load(Ordering::Relaxed),
adaptive_floor_cpu_cores_effective: self
.floor_runtime
.me_adaptive_floor_cpu_cores_effective
.load(Ordering::Relaxed),
adaptive_floor_global_cap_raw: self
.floor_runtime
.me_adaptive_floor_global_cap_raw
.load(Ordering::Relaxed),
adaptive_floor_global_cap_effective: self
.floor_runtime
.me_adaptive_floor_global_cap_effective
.load(Ordering::Relaxed),
adaptive_floor_target_writers_total: self
.floor_runtime
.me_adaptive_floor_target_writers_total
.load(Ordering::Relaxed),
adaptive_floor_active_cap_configured: self
.floor_runtime
.me_adaptive_floor_active_cap_configured
.load(Ordering::Relaxed),
adaptive_floor_active_cap_effective: self
.floor_runtime
.me_adaptive_floor_active_cap_effective
.load(Ordering::Relaxed),
adaptive_floor_warm_cap_configured: self
.floor_runtime
.me_adaptive_floor_warm_cap_configured
.load(Ordering::Relaxed),
adaptive_floor_warm_cap_effective: self
.floor_runtime
.me_adaptive_floor_warm_cap_effective
.load(Ordering::Relaxed),
adaptive_floor_active_writers_current: self
.floor_runtime
.me_adaptive_floor_active_writers_current
.load(Ordering::Relaxed),
adaptive_floor_warm_writers_current: self
.floor_runtime
.me_adaptive_floor_warm_writers_current
.load(Ordering::Relaxed),
me_keepalive_enabled: self.me_keepalive_enabled,
me_keepalive_interval_secs: self.me_keepalive_interval.as_secs(),
me_keepalive_jitter_secs: self.me_keepalive_jitter.as_secs(),
me_keepalive_payload_random: self.me_keepalive_payload_random,
rpc_proxy_req_every_secs: self.rpc_proxy_req_every_secs.load(Ordering::Relaxed),
me_reconnect_max_concurrent_per_dc: self.me_reconnect_max_concurrent_per_dc,
me_reconnect_backoff_base_ms: self.me_reconnect_backoff_base.as_millis() as u64,
me_reconnect_backoff_cap_ms: self.me_reconnect_backoff_cap.as_millis() as u64,
me_reconnect_fast_retry_count: self.me_reconnect_fast_retry_count,
me_pool_drain_ttl_secs: self.me_pool_drain_ttl_secs.load(Ordering::Relaxed),
me_pool_force_close_secs: self.me_pool_force_close_secs.load(Ordering::Relaxed),
me_keepalive_enabled: self.writer_lifecycle.me_keepalive_enabled,
me_keepalive_interval_secs: self.writer_lifecycle.me_keepalive_interval.as_secs(),
me_keepalive_jitter_secs: self.writer_lifecycle.me_keepalive_jitter.as_secs(),
me_keepalive_payload_random: self.writer_lifecycle.me_keepalive_payload_random,
rpc_proxy_req_every_secs: self
.writer_lifecycle
.rpc_proxy_req_every_secs
.load(Ordering::Relaxed),
me_reconnect_max_concurrent_per_dc: self
.reconnect_runtime
.me_reconnect_max_concurrent_per_dc,
me_reconnect_backoff_base_ms: self
.reconnect_runtime
.me_reconnect_backoff_base
.as_millis() as u64,
me_reconnect_backoff_cap_ms: self.reconnect_runtime.me_reconnect_backoff_cap.as_millis()
as u64,
me_reconnect_fast_retry_count: self.reconnect_runtime.me_reconnect_fast_retry_count,
me_pool_drain_ttl_secs: self
.drain_runtime
.me_pool_drain_ttl_secs
.load(Ordering::Relaxed),
me_pool_force_close_secs: self
.drain_runtime
.me_pool_force_close_secs
.load(Ordering::Relaxed),
me_pool_min_fresh_ratio: Self::permille_to_ratio(
self.me_pool_min_fresh_ratio_permille
self.drain_runtime
.me_pool_min_fresh_ratio_permille
.load(Ordering::Relaxed),
),
me_bind_stale_mode: bind_stale_mode_label(self.bind_stale_mode()),
me_bind_stale_ttl_secs: self.me_bind_stale_ttl_secs.load(Ordering::Relaxed),
me_bind_stale_ttl_secs: self
.binding_policy
.me_bind_stale_ttl_secs
.load(Ordering::Relaxed),
me_single_endpoint_shadow_writers: self
.single_endpoint_runtime
.me_single_endpoint_shadow_writers
.load(Ordering::Relaxed),
me_single_endpoint_outage_mode_enabled: self
.single_endpoint_runtime
.me_single_endpoint_outage_mode_enabled
.load(Ordering::Relaxed),
me_single_endpoint_outage_disable_quarantine: self
.single_endpoint_runtime
.me_single_endpoint_outage_disable_quarantine
.load(Ordering::Relaxed),
me_single_endpoint_outage_backoff_min_ms: self
.single_endpoint_runtime
.me_single_endpoint_outage_backoff_min_ms
.load(Ordering::Relaxed),
me_single_endpoint_outage_backoff_max_ms: self
.single_endpoint_runtime
.me_single_endpoint_outage_backoff_max_ms
.load(Ordering::Relaxed),
me_single_endpoint_shadow_rotate_every_secs: self
.single_endpoint_runtime
.me_single_endpoint_shadow_rotate_every_secs
.load(Ordering::Relaxed),
me_deterministic_writer_sort: self.me_deterministic_writer_sort.load(Ordering::Relaxed),
me_deterministic_writer_sort: self
.writer_selection_policy
.me_deterministic_writer_sort
.load(Ordering::Relaxed),
me_writer_pick_mode: writer_pick_mode_label(self.writer_pick_mode()),
me_writer_pick_sample_size: self.writer_pick_sample_size() as u8,
me_socks_kdf_policy: socks_kdf_policy_label(self.socks_kdf_policy()),

View File

@ -1,3 +1,4 @@
use std::collections::HashMap;
use std::io::ErrorKind;
use std::net::SocketAddr;
use std::sync::Arc;
@ -25,6 +26,7 @@ const ME_ACTIVE_PING_SECS: u64 = 25;
const ME_ACTIVE_PING_JITTER_SECS: i64 = 5;
const ME_IDLE_KEEPALIVE_MAX_SECS: u64 = 5;
const ME_RPC_PROXY_REQ_RESPONSE_WAIT_MS: u64 = 700;
const ME_PING_TRACKER_CLEANUP_EVERY: u32 = 32;
#[derive(Clone, Copy)]
enum WriterTeardownMode {
@ -36,6 +38,240 @@ fn is_me_peer_closed_error(error: &ProxyError) -> bool {
matches!(error, ProxyError::Io(ioe) if ioe.kind() == ErrorKind::UnexpectedEof)
}
enum WriterLifecycleExit {
Reader(Result<()>),
Writer(Result<()>),
Ping,
Signal,
Cancelled,
}
async fn writer_command_loop(
mut rx: mpsc::Receiver<WriterCommand>,
mut rpc_writer: RpcWriter,
cancel: CancellationToken,
) -> Result<()> {
loop {
tokio::select! {
cmd = rx.recv() => {
match cmd {
Some(WriterCommand::Data(payload)) => {
rpc_writer.send(&payload).await?;
}
Some(WriterCommand::DataAndFlush(payload)) => {
rpc_writer.send_and_flush(&payload).await?;
}
Some(WriterCommand::Close) | None => return Ok(()),
}
}
_ = cancel.cancelled() => return Ok(()),
}
}
}
#[allow(clippy::too_many_arguments)]
async fn ping_loop(
pool_ping: std::sync::Weak<MePool>,
writer_id: u64,
tx_ping: mpsc::Sender<WriterCommand>,
ping_tracker_ping: Arc<tokio::sync::Mutex<HashMap<i64, Instant>>>,
stats_ping: Arc<crate::stats::Stats>,
keepalive_enabled: bool,
keepalive_interval: Duration,
keepalive_jitter: Duration,
cancel_ping_token: CancellationToken,
) {
let mut ping_id: i64 = rand::random::<i64>();
let mut cleanup_tick: u32 = 0;
let idle_interval_cap = Duration::from_secs(ME_IDLE_KEEPALIVE_MAX_SECS);
// Per-writer jittered start to avoid phase sync.
let startup_jitter = if keepalive_enabled {
let mut interval = keepalive_interval;
let Some(pool) = pool_ping.upgrade() else {
return;
};
if pool.registry.is_writer_empty(writer_id).await {
interval = interval.min(idle_interval_cap);
}
let jitter_cap_ms = interval.as_millis() / 2;
let effective_jitter_ms = keepalive_jitter.as_millis().min(jitter_cap_ms).max(1);
Duration::from_millis(rand::rng().random_range(0..=effective_jitter_ms as u64))
} else {
let jitter =
rand::rng().random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS);
let wait = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64;
Duration::from_secs(wait)
};
tokio::select! {
_ = cancel_ping_token.cancelled() => return,
_ = tokio::time::sleep(startup_jitter) => {}
}
loop {
let wait = if keepalive_enabled {
let mut interval = keepalive_interval;
let Some(pool) = pool_ping.upgrade() else {
return;
};
if pool.registry.is_writer_empty(writer_id).await {
interval = interval.min(idle_interval_cap);
}
let jitter_cap_ms = interval.as_millis() / 2;
let effective_jitter_ms = keepalive_jitter.as_millis().min(jitter_cap_ms).max(1);
interval
+ Duration::from_millis(rand::rng().random_range(0..=effective_jitter_ms as u64))
} else {
let jitter =
rand::rng().random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS);
let secs = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64;
Duration::from_secs(secs)
};
tokio::select! {
_ = cancel_ping_token.cancelled() => return,
_ = tokio::time::sleep(wait) => {}
}
let sent_id = ping_id;
let mut p = Vec::with_capacity(12);
p.extend_from_slice(&RPC_PING_U32.to_le_bytes());
p.extend_from_slice(&sent_id.to_le_bytes());
{
let mut tracker = ping_tracker_ping.lock().await;
cleanup_tick = cleanup_tick.wrapping_add(1);
if cleanup_tick.is_multiple_of(ME_PING_TRACKER_CLEANUP_EVERY) {
let before = tracker.len();
tracker.retain(|_, ts| ts.elapsed() < Duration::from_secs(120));
let expired = before.saturating_sub(tracker.len());
if expired > 0 {
stats_ping.increment_me_keepalive_timeout_by(expired as u64);
}
}
tracker.insert(sent_id, std::time::Instant::now());
}
ping_id = ping_id.wrapping_add(1);
stats_ping.increment_me_keepalive_sent();
if tx_ping
.send(WriterCommand::DataAndFlush(Bytes::from(p)))
.await
.is_err()
{
stats_ping.increment_me_keepalive_failed();
debug!("ME ping failed, removing dead writer");
return;
}
}
}
#[allow(clippy::too_many_arguments)]
async fn rpc_proxy_req_signal_loop(
pool_signal: std::sync::Weak<MePool>,
writer_id: u64,
tx_signal: mpsc::Sender<WriterCommand>,
stats_signal: Arc<crate::stats::Stats>,
cancel_signal: CancellationToken,
keepalive_jitter_signal: Duration,
rpc_proxy_req_every_secs: u64,
) {
if rpc_proxy_req_every_secs == 0 {
// Disabled service signal loop must stay parked until writer cancellation.
// Returning immediately here would complete `select!` and tear down writer lifecycle.
cancel_signal.cancelled().await;
return;
}
let interval = Duration::from_secs(rpc_proxy_req_every_secs);
let startup_jitter_ms = {
let jitter_cap_ms = interval.as_millis() / 2;
let effective_jitter_ms = keepalive_jitter_signal
.as_millis()
.min(jitter_cap_ms)
.max(1);
rand::rng().random_range(0..=effective_jitter_ms as u64)
};
tokio::select! {
_ = cancel_signal.cancelled() => return,
_ = tokio::time::sleep(Duration::from_millis(startup_jitter_ms)) => {}
}
loop {
let wait = {
let jitter_cap_ms = interval.as_millis() / 2;
let effective_jitter_ms = keepalive_jitter_signal
.as_millis()
.min(jitter_cap_ms)
.max(1);
interval
+ Duration::from_millis(rand::rng().random_range(0..=effective_jitter_ms as u64))
};
tokio::select! {
_ = cancel_signal.cancelled() => return,
_ = tokio::time::sleep(wait) => {}
}
let Some(pool) = pool_signal.upgrade() else {
return;
};
let Some(meta) = pool.registry.get_last_writer_meta(writer_id).await else {
stats_signal.increment_me_rpc_proxy_req_signal_skipped_no_meta_total();
continue;
};
let (conn_id, mut service_rx) = pool.registry.register().await;
// Service RPC_PROXY_REQ signal path is intentionally route-only:
// do not bind synthetic conn_id into regular writer/client accounting.
let payload = build_proxy_req_payload(
conn_id,
meta.client_addr,
meta.our_addr,
&[],
pool.proxy_tag.as_deref(),
meta.proto_flags,
);
if tx_signal
.send(WriterCommand::DataAndFlush(payload))
.await
.is_err()
{
stats_signal.increment_me_rpc_proxy_req_signal_failed_total();
let _ = pool.registry.unregister(conn_id).await;
return;
}
stats_signal.increment_me_rpc_proxy_req_signal_sent_total();
if matches!(
tokio::time::timeout(
Duration::from_millis(ME_RPC_PROXY_REQ_RESPONSE_WAIT_MS),
service_rx.recv(),
)
.await,
Ok(Some(_))
) {
stats_signal.increment_me_rpc_proxy_req_signal_response_total();
}
let mut close_payload = Vec::with_capacity(12);
close_payload.extend_from_slice(&RPC_CLOSE_EXT_U32.to_le_bytes());
close_payload.extend_from_slice(&conn_id.to_le_bytes());
if tx_signal
.send(WriterCommand::DataAndFlush(Bytes::from(close_payload)))
.await
.is_err()
{
stats_signal.increment_me_rpc_proxy_req_signal_failed_total();
let _ = pool.registry.unregister(conn_id).await;
return;
}
stats_signal.increment_me_rpc_proxy_req_signal_close_sent_total();
let _ = pool.registry.unregister(conn_id).await;
}
}
impl MePool {
pub(crate) async fn prune_closed_writers(self: &Arc<Self>) {
let closed_writer_ids: Vec<u64> = {
@ -136,46 +372,15 @@ impl MePool {
let draining_started_at_epoch_secs = Arc::new(AtomicU64::new(0));
let drain_deadline_epoch_secs = Arc::new(AtomicU64::new(0));
let allow_drain_fallback = Arc::new(AtomicBool::new(false));
let (tx, mut rx) = mpsc::channel::<WriterCommand>(self.writer_cmd_channel_capacity);
let mut rpc_writer = RpcWriter {
let (tx, rx) =
mpsc::channel::<WriterCommand>(self.writer_lifecycle.writer_cmd_channel_capacity);
let rpc_writer = RpcWriter {
writer: hs.wr,
key: hs.write_key,
iv: hs.write_iv,
seq_no: 0,
crc_mode: hs.crc_mode,
};
let cancel_wr = cancel.clone();
let cleanup_done = Arc::new(AtomicBool::new(false));
let cleanup_for_writer = cleanup_done.clone();
let pool_writer_task = Arc::downgrade(self);
tokio::spawn(async move {
loop {
tokio::select! {
cmd = rx.recv() => {
match cmd {
Some(WriterCommand::Data(payload)) => {
if rpc_writer.send(&payload).await.is_err() { break; }
}
Some(WriterCommand::DataAndFlush(payload)) => {
if rpc_writer.send_and_flush(&payload).await.is_err() { break; }
}
Some(WriterCommand::Close) | None => break,
}
}
_ = cancel_wr.cancelled() => break,
}
}
if cleanup_for_writer
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
if let Some(pool) = pool_writer_task.upgrade() {
pool.remove_writer_and_close_clients(writer_id).await;
} else {
cancel_wr.cancel();
}
}
});
let writer = MeWriter {
id: writer_id,
addr,
@ -193,329 +398,135 @@ impl MePool {
drain_deadline_epoch_secs: drain_deadline_epoch_secs.clone(),
allow_drain_fallback: allow_drain_fallback.clone(),
};
self.writers.write().await.push(writer.clone());
self.writers
.update(|writers| writers.push(writer.clone()))
.await;
self.registry.register_writer(writer_id, tx.clone()).await;
self.registry.mark_writer_idle(writer_id).await;
self.conn_count.fetch_add(1, Ordering::Relaxed);
self.writer_available.notify_one();
self.notify_writer_epoch();
let reg = self.registry.clone();
let writers_arc = self.writers_arc();
let ping_tracker = self.ping_tracker.clone();
let ping_tracker = Arc::new(tokio::sync::Mutex::new(HashMap::<i64, Instant>::new()));
let ping_tracker_reader = ping_tracker.clone();
let ping_tracker_ping = ping_tracker.clone();
let rtt_stats = self.rtt_stats.clone();
let stats_reader = self.stats.clone();
let stats_reader_close = self.stats.clone();
let stats_ping = self.stats.clone();
let pool = Arc::downgrade(self);
let cancel_ping = cancel.clone();
let tx_ping = tx.clone();
let ping_tracker_ping = ping_tracker.clone();
let cleanup_for_reader = cleanup_done.clone();
let cleanup_for_ping = cleanup_done.clone();
let keepalive_enabled = self.me_keepalive_enabled;
let keepalive_interval = self.me_keepalive_interval;
let keepalive_jitter = self.me_keepalive_jitter;
let rpc_proxy_req_every_secs = self.rpc_proxy_req_every_secs.load(Ordering::Relaxed);
let tx_signal = tx.clone();
let stats_signal = self.stats.clone();
let cancel_signal = cancel.clone();
let cleanup_for_signal = cleanup_done.clone();
let pool_signal = Arc::downgrade(self);
let keepalive_jitter_signal = self.me_keepalive_jitter;
let cancel_reader_token = cancel.clone();
let cancel_ping_token = cancel_ping.clone();
let reader_route_data_wait_ms = self.me_reader_route_data_wait_ms.clone();
tokio::spawn(async move {
let res = reader_loop(
hs.rd,
hs.read_key,
hs.read_iv,
hs.crc_mode,
reg.clone(),
BytesMut::new(),
BytesMut::new(),
tx.clone(),
ping_tracker_reader,
rtt_stats.clone(),
stats_reader,
writer_id,
degraded.clone(),
rtt_ema_ms_x10.clone(),
reader_route_data_wait_ms,
cancel_reader_token.clone(),
)
.await;
let idle_close_by_peer = if let Err(e) = res.as_ref() {
is_me_peer_closed_error(e) && reg.is_writer_empty(writer_id).await
} else {
false
};
if idle_close_by_peer {
stats_reader_close.increment_me_idle_close_by_peer_total();
info!(writer_id, "ME socket closed by peer on idle writer");
}
if cleanup_for_reader
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
if let Some(pool) = pool.upgrade() {
pool.remove_writer_and_close_clients(writer_id).await;
} else {
// Fallback for shutdown races: make writer task exit quickly so stale
// channels are observable by periodic prune.
cancel_reader_token.cancel();
}
}
if let Err(e) = res
&& !idle_close_by_peer
{
warn!(error = %e, "ME reader ended");
}
let remaining = writers_arc.read().await.len();
debug!(writer_id, remaining, "ME reader task finished");
});
let pool_lifecycle = Arc::downgrade(self);
let pool_ping = Arc::downgrade(self);
let pool_signal = Arc::downgrade(self);
let tx_reader = tx.clone();
let tx_ping = tx.clone();
let tx_signal = tx.clone();
let keepalive_enabled = self.writer_lifecycle.me_keepalive_enabled;
let keepalive_interval = self.writer_lifecycle.me_keepalive_interval;
let keepalive_jitter = self.writer_lifecycle.me_keepalive_jitter;
let keepalive_jitter_signal = self.writer_lifecycle.me_keepalive_jitter;
let rpc_proxy_req_every_secs = self
.writer_lifecycle
.rpc_proxy_req_every_secs
.load(Ordering::Relaxed);
let cancel_reader = cancel.clone();
let cancel_writer = cancel.clone();
let cancel_ping = cancel.clone();
let cancel_signal = cancel.clone();
let cancel_select = cancel.clone();
let cancel_cleanup = cancel.clone();
let reader_route_data_wait_ms = self.transport_policy.me_reader_route_data_wait_ms.clone();
tokio::spawn(async move {
let mut ping_id: i64 = rand::random::<i64>();
let idle_interval_cap = Duration::from_secs(ME_IDLE_KEEPALIVE_MAX_SECS);
// Per-writer jittered start to avoid phase sync.
let startup_jitter = if keepalive_enabled {
let mut interval = keepalive_interval;
if let Some(pool) = pool_ping.upgrade() {
if pool.registry.is_writer_empty(writer_id).await {
interval = interval.min(idle_interval_cap);
}
} else {
return;
// Reader MUST be the first branch in biased select! to avoid read starvation.
let exit = tokio::select! {
biased;
reader_res = reader_loop(
hs.rd,
hs.read_key,
hs.read_iv,
hs.crc_mode,
reg.clone(),
BytesMut::new(),
BytesMut::new(),
tx_reader,
ping_tracker_reader,
rtt_stats,
stats_reader,
writer_id,
degraded,
rtt_ema_ms_x10,
reader_route_data_wait_ms,
cancel_reader,
) => WriterLifecycleExit::Reader(reader_res),
writer_res = writer_command_loop(rx, rpc_writer, cancel_writer) => {
WriterLifecycleExit::Writer(writer_res)
}
let jitter_cap_ms = interval.as_millis() / 2;
let effective_jitter_ms = keepalive_jitter.as_millis().min(jitter_cap_ms).max(1);
Duration::from_millis(rand::rng().random_range(0..=effective_jitter_ms as u64))
} else {
let jitter = rand::rng()
.random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS);
let wait = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64;
Duration::from_secs(wait)
_ = ping_loop(
pool_ping,
writer_id,
tx_ping,
ping_tracker_ping,
stats_ping,
keepalive_enabled,
keepalive_interval,
keepalive_jitter,
cancel_ping,
) => WriterLifecycleExit::Ping,
_ = rpc_proxy_req_signal_loop(
pool_signal,
writer_id,
tx_signal,
stats_signal,
cancel_signal,
keepalive_jitter_signal,
rpc_proxy_req_every_secs,
) => WriterLifecycleExit::Signal,
_ = cancel_select.cancelled() => WriterLifecycleExit::Cancelled,
};
tokio::select! {
_ = cancel_ping_token.cancelled() => return,
_ = tokio::time::sleep(startup_jitter) => {}
}
loop {
let wait = if keepalive_enabled {
let mut interval = keepalive_interval;
if let Some(pool) = pool_ping.upgrade() {
if pool.registry.is_writer_empty(writer_id).await {
interval = interval.min(idle_interval_cap);
}
match exit {
WriterLifecycleExit::Reader(res) => {
let idle_close_by_peer = if let Err(e) = res.as_ref() {
is_me_peer_closed_error(e) && reg.is_writer_empty(writer_id).await
} else {
break;
false
};
if idle_close_by_peer {
stats_reader_close.increment_me_idle_close_by_peer_total();
info!(writer_id, "ME socket closed by peer on idle writer");
}
let jitter_cap_ms = interval.as_millis() / 2;
let effective_jitter_ms =
keepalive_jitter.as_millis().min(jitter_cap_ms).max(1);
interval
+ Duration::from_millis(
rand::rng().random_range(0..=effective_jitter_ms as u64),
)
} else {
let jitter = rand::rng()
.random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS);
let secs = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64;
Duration::from_secs(secs)
};
tokio::select! {
_ = cancel_ping_token.cancelled() => {
break;
}
_ = tokio::time::sleep(wait) => {}
}
let sent_id = ping_id;
let mut p = Vec::with_capacity(12);
p.extend_from_slice(&RPC_PING_U32.to_le_bytes());
p.extend_from_slice(&sent_id.to_le_bytes());
{
let mut tracker = ping_tracker_ping.lock().await;
let now_epoch_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64;
let mut run_cleanup = false;
if let Some(pool) = pool_ping.upgrade() {
let last_cleanup_ms = pool
.ping_tracker_last_cleanup_epoch_ms
.load(Ordering::Relaxed);
if now_epoch_ms.saturating_sub(last_cleanup_ms) >= 30_000
&& pool
.ping_tracker_last_cleanup_epoch_ms
.compare_exchange(
last_cleanup_ms,
now_epoch_ms,
Ordering::AcqRel,
Ordering::Relaxed,
)
.is_ok()
{
run_cleanup = true;
}
}
if run_cleanup {
let before = tracker.len();
tracker.retain(|_, (ts, _)| ts.elapsed() < Duration::from_secs(120));
let expired = before.saturating_sub(tracker.len());
if expired > 0 {
stats_ping.increment_me_keepalive_timeout_by(expired as u64);
}
}
tracker.insert(sent_id, (std::time::Instant::now(), writer_id));
}
ping_id = ping_id.wrapping_add(1);
stats_ping.increment_me_keepalive_sent();
if tx_ping
.send(WriterCommand::DataAndFlush(Bytes::from(p)))
.await
.is_err()
{
stats_ping.increment_me_keepalive_failed();
debug!("ME ping failed, removing dead writer");
cancel_ping.cancel();
if cleanup_for_ping
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
&& let Some(pool) = pool_ping.upgrade()
if let Err(e) = res
&& !idle_close_by_peer
{
pool.remove_writer_and_close_clients(writer_id).await;
warn!(error = %e, "ME reader ended");
}
break;
}
}
});
tokio::spawn(async move {
if rpc_proxy_req_every_secs == 0 {
return;
}
let interval = Duration::from_secs(rpc_proxy_req_every_secs);
let startup_jitter_ms = {
let jitter_cap_ms = interval.as_millis() / 2;
let effective_jitter_ms = keepalive_jitter_signal
.as_millis()
.min(jitter_cap_ms)
.max(1);
rand::rng().random_range(0..=effective_jitter_ms as u64)
};
tokio::select! {
_ = cancel_signal.cancelled() => return,
_ = tokio::time::sleep(Duration::from_millis(startup_jitter_ms)) => {}
}
loop {
let wait = {
let jitter_cap_ms = interval.as_millis() / 2;
let effective_jitter_ms = keepalive_jitter_signal
.as_millis()
.min(jitter_cap_ms)
.max(1);
interval
+ Duration::from_millis(
rand::rng().random_range(0..=effective_jitter_ms as u64),
)
};
tokio::select! {
_ = cancel_signal.cancelled() => break,
_ = tokio::time::sleep(wait) => {}
}
let Some(pool) = pool_signal.upgrade() else {
break;
};
let Some(meta) = pool.registry.get_last_writer_meta(writer_id).await else {
stats_signal.increment_me_rpc_proxy_req_signal_skipped_no_meta_total();
continue;
};
let (conn_id, mut service_rx) = pool.registry.register().await;
if !pool
.registry
.bind_writer(conn_id, writer_id, meta.clone())
.await
{
let _ = pool.registry.unregister(conn_id).await;
stats_signal.increment_me_rpc_proxy_req_signal_skipped_no_meta_total();
continue;
}
let payload = build_proxy_req_payload(
conn_id,
meta.client_addr,
meta.our_addr,
&[],
pool.proxy_tag.as_deref(),
meta.proto_flags,
);
if tx_signal
.send(WriterCommand::DataAndFlush(payload))
.await
.is_err()
{
stats_signal.increment_me_rpc_proxy_req_signal_failed_total();
let _ = pool.registry.unregister(conn_id).await;
cancel_signal.cancel();
if cleanup_for_signal
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
pool.remove_writer_and_close_clients(writer_id).await;
WriterLifecycleExit::Writer(res) => {
if let Err(e) = res {
warn!(error = %e, "ME writer command loop ended");
}
break;
}
stats_signal.increment_me_rpc_proxy_req_signal_sent_total();
if matches!(
tokio::time::timeout(
Duration::from_millis(ME_RPC_PROXY_REQ_RESPONSE_WAIT_MS),
service_rx.recv(),
)
.await,
Ok(Some(_))
) {
stats_signal.increment_me_rpc_proxy_req_signal_response_total();
WriterLifecycleExit::Ping => {
debug!(writer_id, "ME ping loop finished");
}
let mut close_payload = Vec::with_capacity(12);
close_payload.extend_from_slice(&RPC_CLOSE_EXT_U32.to_le_bytes());
close_payload.extend_from_slice(&conn_id.to_le_bytes());
if tx_signal
.send(WriterCommand::DataAndFlush(Bytes::from(close_payload)))
.await
.is_err()
{
stats_signal.increment_me_rpc_proxy_req_signal_failed_total();
let _ = pool.registry.unregister(conn_id).await;
cancel_signal.cancel();
if cleanup_for_signal
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
pool.remove_writer_and_close_clients(writer_id).await;
}
break;
WriterLifecycleExit::Signal => {
debug!(writer_id, "ME rpc_proxy_req signal loop finished");
}
stats_signal.increment_me_rpc_proxy_req_signal_close_sent_total();
let _ = pool.registry.unregister(conn_id).await;
WriterLifecycleExit::Cancelled => {}
}
if let Some(pool) = pool_lifecycle.upgrade() {
pool.remove_writer_and_close_clients(writer_id).await;
} else {
// Fallback for shutdown races: make lifecycle exit observable by prune.
cancel_cleanup.cancel();
}
let remaining = writers_arc.read().await.len();
debug!(writer_id, remaining, "ME writer lifecycle task finished");
});
Ok(())
@ -594,23 +605,36 @@ impl MePool {
// The close command below is only a best-effort accelerator for task shutdown.
// Cleanup progress must never depend on command-channel availability.
let _ = self.registry.writer_lost(writer_id).await;
{
let mut tracker = self.ping_tracker.lock().await;
tracker.retain(|_, (_, wid)| *wid != writer_id);
}
self.rtt_stats.lock().await.remove(&writer_id);
if let Some(tx) = close_tx {
let _ = tx.send(WriterCommand::Close).await;
// Keep teardown critical path non-blocking: close is best-effort only.
let _ = tx.try_send(WriterCommand::Close);
}
if let Some(addr) = removed_addr {
if let Some(uptime) = removed_uptime {
// Quarantine flapping endpoints regardless of draining state.
self.maybe_quarantine_flapping_endpoint(addr, uptime).await;
// Quarantine contract: only unexpected removals are considered endpoint flap.
if trigger_refill {
self.stats
.increment_me_endpoint_quarantine_unexpected_total();
self.maybe_quarantine_flapping_endpoint(addr, uptime, "unexpected")
.await;
} else {
self.stats
.increment_me_endpoint_quarantine_draining_suppressed_total();
debug!(
%addr,
uptime_ms = uptime.as_millis(),
"Skipping endpoint quarantine for draining writer removal"
);
}
}
if trigger_refill && let Some(writer_dc) = removed_dc {
self.trigger_immediate_refill_for_dc(addr, writer_dc);
}
}
if removed {
self.notify_writer_epoch();
}
removed
}
@ -676,7 +700,10 @@ impl MePool {
MeBindStaleMode::Never => false,
MeBindStaleMode::Always => true,
MeBindStaleMode::Ttl => {
let ttl_secs = self.me_bind_stale_ttl_secs.load(Ordering::Relaxed);
let ttl_secs = self
.binding_policy
.me_bind_stale_ttl_secs
.load(Ordering::Relaxed);
if ttl_secs == 0 {
return true;
}

View File

@ -32,10 +32,10 @@ pub(crate) async fn reader_loop(
enc_leftover: BytesMut,
mut dec: BytesMut,
tx: mpsc::Sender<WriterCommand>,
ping_tracker: Arc<Mutex<HashMap<i64, (Instant, u64)>>>,
ping_tracker: Arc<Mutex<HashMap<i64, Instant>>>,
rtt_stats: Arc<Mutex<HashMap<u64, (f64, f64)>>>,
stats: Arc<Stats>,
_writer_id: u64,
writer_id: u64,
degraded: Arc<AtomicBool>,
writer_rtt_ema_ms_x10: Arc<AtomicU32>,
reader_route_data_wait_ms: Arc<AtomicU64>,
@ -45,7 +45,7 @@ pub(crate) async fn reader_loop(
let mut expected_seq: i32 = 0;
loop {
let mut tmp = [0u8; 16_384];
let mut tmp = [0u8; 65_536];
let n = tokio::select! {
res = rd.read(&mut tmp) => res.map_err(ProxyError::Io)?,
_ = cancel.cancelled() => return Ok(()),
@ -203,13 +203,13 @@ pub(crate) async fn reader_loop(
} else if pt == RPC_PONG_U32 && body.len() >= 8 {
let ping_id = i64::from_le_bytes(body[0..8].try_into().unwrap());
stats.increment_me_keepalive_pong();
if let Some((sent, wid)) = {
if let Some(sent) = {
let mut guard = ping_tracker.lock().await;
guard.remove(&ping_id)
} {
let rtt = sent.elapsed().as_secs_f64() * 1000.0;
let mut stats = rtt_stats.lock().await;
let entry = stats.entry(wid).or_insert((rtt, rtt));
let entry = stats.entry(writer_id).or_insert((rtt, rtt));
entry.1 = entry.1 * 0.8 + rtt * 0.2;
if rtt < entry.0 {
entry.0 = rtt;
@ -224,7 +224,7 @@ pub(crate) async fn reader_loop(
Ordering::Relaxed,
);
trace!(
writer_id = wid,
writer_id,
rtt_ms = rtt,
ema_ms = entry.1,
base_ms = entry.0,

View File

@ -3,8 +3,9 @@ use std::net::SocketAddr;
use std::sync::atomic::{AtomicU8, AtomicU64, Ordering};
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use dashmap::DashMap;
use tokio::sync::mpsc::error::TrySendError;
use tokio::sync::{RwLock, mpsc};
use tokio::sync::{Mutex, mpsc};
use super::MeResponse;
use super::codec::WriterCommand;
@ -50,8 +51,15 @@ pub(super) struct WriterActivitySnapshot {
pub active_sessions_by_target_dc: HashMap<i16, usize>,
}
struct RegistryInner {
map: HashMap<u64, mpsc::Sender<MeResponse>>,
struct RoutingTable {
map: DashMap<u64, mpsc::Sender<MeResponse>>,
}
struct BindingState {
inner: Mutex<BindingInner>,
}
struct BindingInner {
writers: HashMap<u64, mpsc::Sender<WriterCommand>>,
writer_for_conn: HashMap<u64, u64>,
conns_for_writer: HashMap<u64, HashSet<u64>>,
@ -60,10 +68,9 @@ struct RegistryInner {
writer_idle_since_epoch_secs: HashMap<u64, u64>,
}
impl RegistryInner {
impl BindingInner {
fn new() -> Self {
Self {
map: HashMap::new(),
writers: HashMap::new(),
writer_for_conn: HashMap::new(),
conns_for_writer: HashMap::new(),
@ -75,7 +82,8 @@ impl RegistryInner {
}
pub struct ConnRegistry {
inner: RwLock<RegistryInner>,
routing: RoutingTable,
binding: BindingState,
next_id: AtomicU64,
route_channel_capacity: usize,
route_backpressure_base_timeout_ms: AtomicU64,
@ -94,7 +102,12 @@ impl ConnRegistry {
pub fn with_route_channel_capacity(route_channel_capacity: usize) -> Self {
let start = rand::random::<u64>() | 1;
Self {
inner: RwLock::new(RegistryInner::new()),
routing: RoutingTable {
map: DashMap::new(),
},
binding: BindingState {
inner: Mutex::new(BindingInner::new()),
},
next_id: AtomicU64::new(start),
route_channel_capacity: route_channel_capacity.max(1),
route_backpressure_base_timeout_ms: AtomicU64::new(ROUTE_BACKPRESSURE_BASE_TIMEOUT_MS),
@ -130,14 +143,14 @@ impl ConnRegistry {
pub async fn register(&self) -> (u64, mpsc::Receiver<MeResponse>) {
let id = self.next_id.fetch_add(1, Ordering::Relaxed);
let (tx, rx) = mpsc::channel(self.route_channel_capacity);
self.inner.write().await.map.insert(id, tx);
self.routing.map.insert(id, tx);
(id, rx)
}
pub async fn register_writer(&self, writer_id: u64, tx: mpsc::Sender<WriterCommand>) {
let mut inner = self.inner.write().await;
inner.writers.insert(writer_id, tx);
inner
let mut binding = self.binding.inner.lock().await;
binding.writers.insert(writer_id, tx);
binding
.conns_for_writer
.entry(writer_id)
.or_insert_with(HashSet::new);
@ -145,18 +158,18 @@ impl ConnRegistry {
/// Unregister connection, returning associated writer_id if any.
pub async fn unregister(&self, id: u64) -> Option<u64> {
let mut inner = self.inner.write().await;
inner.map.remove(&id);
inner.meta.remove(&id);
if let Some(writer_id) = inner.writer_for_conn.remove(&id) {
let became_empty = if let Some(set) = inner.conns_for_writer.get_mut(&writer_id) {
self.routing.map.remove(&id);
let mut binding = self.binding.inner.lock().await;
binding.meta.remove(&id);
if let Some(writer_id) = binding.writer_for_conn.remove(&id) {
let became_empty = if let Some(set) = binding.conns_for_writer.get_mut(&writer_id) {
set.remove(&id);
set.is_empty()
} else {
false
};
if became_empty {
inner
binding
.writer_idle_since_epoch_secs
.insert(writer_id, Self::now_epoch_secs());
}
@ -167,10 +180,7 @@ impl ConnRegistry {
#[allow(dead_code)]
pub async fn route(&self, id: u64, resp: MeResponse) -> RouteResult {
let tx = {
let inner = self.inner.read().await;
inner.map.get(&id).cloned()
};
let tx = self.routing.map.get(&id).map(|entry| entry.value().clone());
let Some(tx) = tx else {
return RouteResult::NoConn;
@ -223,10 +233,7 @@ impl ConnRegistry {
}
pub async fn route_nowait(&self, id: u64, resp: MeResponse) -> RouteResult {
let tx = {
let inner = self.inner.read().await;
inner.map.get(&id).cloned()
};
let tx = self.routing.map.get(&id).map(|entry| entry.value().clone());
let Some(tx) = tx else {
return RouteResult::NoConn;
@ -249,10 +256,7 @@ impl ConnRegistry {
return self.route_nowait(id, resp).await;
}
let tx = {
let inner = self.inner.read().await;
inner.map.get(&id).cloned()
};
let tx = self.routing.map.get(&id).map(|entry| entry.value().clone());
let Some(tx) = tx else {
return RouteResult::NoConn;
@ -291,33 +295,39 @@ impl ConnRegistry {
}
pub async fn bind_writer(&self, conn_id: u64, writer_id: u64, meta: ConnMeta) -> bool {
let mut inner = self.inner.write().await;
if !inner.writers.contains_key(&writer_id) {
let mut binding = self.binding.inner.lock().await;
// ROUTING IS THE SOURCE OF TRUTH:
// never keep/attach writer binding for a connection that is already
// absent from the routing table.
if !self.routing.map.contains_key(&conn_id) {
return false;
}
if !binding.writers.contains_key(&writer_id) {
return false;
}
let previous_writer_id = inner.writer_for_conn.insert(conn_id, writer_id);
let previous_writer_id = binding.writer_for_conn.insert(conn_id, writer_id);
if let Some(previous_writer_id) = previous_writer_id
&& previous_writer_id != writer_id
{
let became_empty =
if let Some(set) = inner.conns_for_writer.get_mut(&previous_writer_id) {
if let Some(set) = binding.conns_for_writer.get_mut(&previous_writer_id) {
set.remove(&conn_id);
set.is_empty()
} else {
false
};
if became_empty {
inner
binding
.writer_idle_since_epoch_secs
.insert(previous_writer_id, Self::now_epoch_secs());
}
}
inner.meta.insert(conn_id, meta.clone());
inner.last_meta_for_writer.insert(writer_id, meta);
inner.writer_idle_since_epoch_secs.remove(&writer_id);
inner
binding.meta.insert(conn_id, meta.clone());
binding.last_meta_for_writer.insert(writer_id, meta);
binding.writer_idle_since_epoch_secs.remove(&writer_id);
binding
.conns_for_writer
.entry(writer_id)
.or_insert_with(HashSet::new)
@ -326,32 +336,32 @@ impl ConnRegistry {
}
pub async fn mark_writer_idle(&self, writer_id: u64) {
let mut inner = self.inner.write().await;
inner
let mut binding = self.binding.inner.lock().await;
binding
.conns_for_writer
.entry(writer_id)
.or_insert_with(HashSet::new);
inner
binding
.writer_idle_since_epoch_secs
.entry(writer_id)
.or_insert(Self::now_epoch_secs());
}
pub async fn get_last_writer_meta(&self, writer_id: u64) -> Option<ConnMeta> {
let inner = self.inner.read().await;
inner.last_meta_for_writer.get(&writer_id).cloned()
let binding = self.binding.inner.lock().await;
binding.last_meta_for_writer.get(&writer_id).cloned()
}
pub async fn writer_idle_since_snapshot(&self) -> HashMap<u64, u64> {
let inner = self.inner.read().await;
inner.writer_idle_since_epoch_secs.clone()
let binding = self.binding.inner.lock().await;
binding.writer_idle_since_epoch_secs.clone()
}
pub async fn writer_idle_since_for_writer_ids(&self, writer_ids: &[u64]) -> HashMap<u64, u64> {
let inner = self.inner.read().await;
let binding = self.binding.inner.lock().await;
let mut out = HashMap::<u64, u64>::with_capacity(writer_ids.len());
for writer_id in writer_ids {
if let Some(idle_since) = inner.writer_idle_since_epoch_secs.get(writer_id).copied() {
if let Some(idle_since) = binding.writer_idle_since_epoch_secs.get(writer_id).copied() {
out.insert(*writer_id, idle_since);
}
}
@ -359,14 +369,14 @@ impl ConnRegistry {
}
pub(super) async fn writer_activity_snapshot(&self) -> WriterActivitySnapshot {
let inner = self.inner.read().await;
let binding = self.binding.inner.lock().await;
let mut bound_clients_by_writer = HashMap::<u64, usize>::new();
let mut active_sessions_by_target_dc = HashMap::<i16, usize>::new();
for (writer_id, conn_ids) in &inner.conns_for_writer {
for (writer_id, conn_ids) in &binding.conns_for_writer {
bound_clients_by_writer.insert(*writer_id, conn_ids.len());
}
for conn_meta in inner.meta.values() {
for conn_meta in binding.meta.values() {
if conn_meta.target_dc == 0 {
continue;
}
@ -382,9 +392,39 @@ impl ConnRegistry {
}
pub async fn get_writer(&self, conn_id: u64) -> Option<ConnWriter> {
let inner = self.inner.read().await;
let writer_id = inner.writer_for_conn.get(&conn_id).cloned()?;
let writer = inner.writers.get(&writer_id).cloned()?;
let mut binding = self.binding.inner.lock().await;
// ROUTING IS THE SOURCE OF TRUTH:
// stale bindings are ignored and lazily cleaned when routing no longer
// contains the connection.
if !self.routing.map.contains_key(&conn_id) {
binding.meta.remove(&conn_id);
if let Some(stale_writer_id) = binding.writer_for_conn.remove(&conn_id)
&& let Some(conns) = binding.conns_for_writer.get_mut(&stale_writer_id)
{
conns.remove(&conn_id);
if conns.is_empty() {
binding
.writer_idle_since_epoch_secs
.insert(stale_writer_id, Self::now_epoch_secs());
}
}
return None;
}
let writer_id = binding.writer_for_conn.get(&conn_id).copied()?;
let Some(writer) = binding.writers.get(&writer_id).cloned() else {
binding.writer_for_conn.remove(&conn_id);
binding.meta.remove(&conn_id);
if let Some(conns) = binding.conns_for_writer.get_mut(&writer_id) {
conns.remove(&conn_id);
if conns.is_empty() {
binding
.writer_idle_since_epoch_secs
.insert(writer_id, Self::now_epoch_secs());
}
}
return None;
};
Some(ConnWriter {
writer_id,
tx: writer,
@ -392,16 +432,16 @@ impl ConnRegistry {
}
pub async fn active_conn_ids(&self) -> Vec<u64> {
let inner = self.inner.read().await;
inner.writer_for_conn.keys().copied().collect()
let binding = self.binding.inner.lock().await;
binding.writer_for_conn.keys().copied().collect()
}
pub async fn writer_lost(&self, writer_id: u64) -> Vec<BoundConn> {
let mut inner = self.inner.write().await;
inner.writers.remove(&writer_id);
inner.last_meta_for_writer.remove(&writer_id);
inner.writer_idle_since_epoch_secs.remove(&writer_id);
let conns = inner
let mut binding = self.binding.inner.lock().await;
binding.writers.remove(&writer_id);
binding.last_meta_for_writer.remove(&writer_id);
binding.writer_idle_since_epoch_secs.remove(&writer_id);
let conns = binding
.conns_for_writer
.remove(&writer_id)
.unwrap_or_default()
@ -410,11 +450,11 @@ impl ConnRegistry {
let mut out = Vec::new();
for conn_id in conns {
if inner.writer_for_conn.get(&conn_id).copied() != Some(writer_id) {
if binding.writer_for_conn.get(&conn_id).copied() != Some(writer_id) {
continue;
}
inner.writer_for_conn.remove(&conn_id);
if let Some(m) = inner.meta.get(&conn_id) {
binding.writer_for_conn.remove(&conn_id);
if let Some(m) = binding.meta.get(&conn_id) {
out.push(BoundConn {
conn_id,
meta: m.clone(),
@ -426,13 +466,13 @@ impl ConnRegistry {
#[allow(dead_code)]
pub async fn get_meta(&self, conn_id: u64) -> Option<ConnMeta> {
let inner = self.inner.read().await;
inner.meta.get(&conn_id).cloned()
let binding = self.binding.inner.lock().await;
binding.meta.get(&conn_id).cloned()
}
pub async fn is_writer_empty(&self, writer_id: u64) -> bool {
let inner = self.inner.read().await;
inner
let binding = self.binding.inner.lock().await;
binding
.conns_for_writer
.get(&writer_id)
.map(|s| s.is_empty())
@ -441,8 +481,8 @@ impl ConnRegistry {
#[allow(dead_code)]
pub async fn unregister_writer_if_empty(&self, writer_id: u64) -> bool {
let mut inner = self.inner.write().await;
let Some(conn_ids) = inner.conns_for_writer.get(&writer_id) else {
let mut binding = self.binding.inner.lock().await;
let Some(conn_ids) = binding.conns_for_writer.get(&writer_id) else {
// Writer is already absent from the registry.
return true;
};
@ -450,19 +490,19 @@ impl ConnRegistry {
return false;
}
inner.writers.remove(&writer_id);
inner.last_meta_for_writer.remove(&writer_id);
inner.writer_idle_since_epoch_secs.remove(&writer_id);
inner.conns_for_writer.remove(&writer_id);
binding.writers.remove(&writer_id);
binding.last_meta_for_writer.remove(&writer_id);
binding.writer_idle_since_epoch_secs.remove(&writer_id);
binding.conns_for_writer.remove(&writer_id);
true
}
#[allow(dead_code)]
pub(super) async fn non_empty_writer_ids(&self, writer_ids: &[u64]) -> HashSet<u64> {
let inner = self.inner.read().await;
let binding = self.binding.inner.lock().await;
let mut out = HashSet::<u64>::with_capacity(writer_ids.len());
for writer_id in writer_ids {
if let Some(conns) = inner.conns_for_writer.get(writer_id)
if let Some(conns) = binding.conns_for_writer.get(writer_id)
&& !conns.is_empty()
{
out.insert(*writer_id);

View File

@ -26,6 +26,9 @@ use rand::seq::SliceRandom;
const IDLE_WRITER_PENALTY_MID_SECS: u64 = 45;
const IDLE_WRITER_PENALTY_HIGH_SECS: u64 = 55;
const HYBRID_GLOBAL_BURST_PERIOD_ROUNDS: u32 = 4;
const HYBRID_RECENT_SUCCESS_WINDOW_MS: u64 = 120_000;
const HYBRID_TIMEOUT_WARN_RATE_LIMIT_MS: u64 = 5_000;
const HYBRID_RECOVERY_TRIGGER_MIN_INTERVAL_MS: u64 = 5_000;
const PICK_PENALTY_WARM: u64 = 200;
const PICK_PENALTY_DRAINING: u64 = 600;
const PICK_PENALTY_STALE: u64 = 300;
@ -68,8 +71,11 @@ impl MePool {
},
)
};
let no_writer_mode =
MeRouteNoWriterMode::from_u8(self.me_route_no_writer_mode.load(Ordering::Relaxed));
let no_writer_mode = MeRouteNoWriterMode::from_u8(
self.route_runtime
.me_route_no_writer_mode
.load(Ordering::Relaxed),
);
let (routed_dc, unknown_target_dc) =
self.resolve_target_dc_for_routing(target_dc as i32).await;
let mut no_writer_deadline: Option<Instant> = None;
@ -77,7 +83,11 @@ impl MePool {
let mut async_recovery_triggered = false;
let mut hybrid_recovery_round = 0u32;
let mut hybrid_last_recovery_at: Option<Instant> = None;
let hybrid_wait_step = self.me_route_no_writer_wait.max(Duration::from_millis(50));
let mut hybrid_total_deadline: Option<Instant> = None;
let hybrid_wait_step = self
.route_runtime
.me_route_no_writer_wait
.max(Duration::from_millis(50));
let mut hybrid_wait_current = hybrid_wait_step;
loop {
@ -92,9 +102,13 @@ impl MePool {
.tx
.try_send(WriterCommand::Data(current_payload.clone()))
{
Ok(()) => return Ok(()),
Ok(()) => {
self.note_hybrid_route_success();
return Ok(());
}
Err(TrySendError::Full(cmd)) => {
if current.tx.send(cmd).await.is_ok() {
self.note_hybrid_route_success();
return Ok(());
}
warn!(writer_id = current.writer_id, "ME writer channel closed");
@ -118,7 +132,7 @@ impl MePool {
match no_writer_mode {
MeRouteNoWriterMode::AsyncRecoveryFailfast => {
let deadline = *no_writer_deadline.get_or_insert_with(|| {
Instant::now() + self.me_route_no_writer_wait
Instant::now() + self.route_runtime.me_route_no_writer_wait
});
if !async_recovery_triggered && !unknown_target_dc {
let triggered =
@ -139,7 +153,9 @@ impl MePool {
MeRouteNoWriterMode::InlineRecoveryLegacy => {
self.stats.increment_me_inline_recovery_total();
if !unknown_target_dc {
for _ in 0..self.me_route_inline_recovery_attempts.max(1) {
for _ in
0..self.route_runtime.me_route_inline_recovery_attempts.max(1)
{
for family in self.family_order() {
let map = match family {
IpFamily::V4 => self.proxy_map_v4.read().await.clone(),
@ -168,7 +184,7 @@ impl MePool {
continue;
}
let deadline = *no_writer_deadline.get_or_insert_with(|| {
Instant::now() + self.me_route_inline_recovery_wait
Instant::now() + self.route_runtime.me_route_inline_recovery_wait
});
if !self.wait_for_writer_until(deadline).await {
if !self.writers.read().await.is_empty() {
@ -182,6 +198,15 @@ impl MePool {
continue;
}
MeRouteNoWriterMode::HybridAsyncPersistent => {
let total_deadline = *hybrid_total_deadline.get_or_insert_with(|| {
Instant::now() + self.hybrid_total_wait_budget()
});
if Instant::now() >= total_deadline {
self.on_hybrid_timeout(total_deadline, routed_dc);
return Err(ProxyError::Proxy(
"ME writer not available within hybrid timeout".into(),
));
}
if !unknown_target_dc {
self.maybe_trigger_hybrid_recovery(
routed_dc,
@ -214,8 +239,9 @@ impl MePool {
let pick_mode = self.writer_pick_mode();
match no_writer_mode {
MeRouteNoWriterMode::AsyncRecoveryFailfast => {
let deadline = *no_writer_deadline
.get_or_insert_with(|| Instant::now() + self.me_route_no_writer_wait);
let deadline = *no_writer_deadline.get_or_insert_with(|| {
Instant::now() + self.route_runtime.me_route_no_writer_wait
});
if !async_recovery_triggered && !unknown_target_dc {
let triggered =
self.trigger_async_recovery_for_target_dc(routed_dc).await;
@ -238,7 +264,7 @@ impl MePool {
self.stats.increment_me_inline_recovery_total();
if unknown_target_dc {
let deadline = *no_writer_deadline.get_or_insert_with(|| {
Instant::now() + self.me_route_inline_recovery_wait
Instant::now() + self.route_runtime.me_route_inline_recovery_wait
});
if self.wait_for_candidate_until(routed_dc, deadline).await {
continue;
@ -250,7 +276,9 @@ impl MePool {
"No ME writers available for target DC".into(),
));
}
if emergency_attempts >= self.me_route_inline_recovery_attempts.max(1) {
if emergency_attempts
>= self.route_runtime.me_route_inline_recovery_attempts.max(1)
{
self.stats
.increment_me_writer_pick_no_candidate_total(pick_mode);
self.stats.increment_me_no_writer_failfast_total();
@ -292,6 +320,16 @@ impl MePool {
}
}
MeRouteNoWriterMode::HybridAsyncPersistent => {
let total_deadline = *hybrid_total_deadline.get_or_insert_with(|| {
Instant::now() + self.hybrid_total_wait_budget()
});
if Instant::now() >= total_deadline {
self.on_hybrid_timeout(total_deadline, routed_dc);
return Err(ProxyError::Proxy(
"No ME writers available for target DC within hybrid timeout"
.into(),
));
}
if !unknown_target_dc {
self.maybe_trigger_hybrid_recovery(
routed_dc,
@ -332,7 +370,11 @@ impl MePool {
pick_sample_size,
)
} else {
if self.me_deterministic_writer_sort.load(Ordering::Relaxed) {
if self
.writer_selection_policy
.me_deterministic_writer_sort
.load(Ordering::Relaxed)
{
candidate_indices.sort_by(|lhs, rhs| {
let left = &writers_snapshot[*lhs];
let right = &writers_snapshot[*rhs];
@ -423,6 +465,7 @@ impl MePool {
"Selected stale ME writer for fallback bind"
);
}
self.note_hybrid_route_success();
return Ok(());
}
Err(TrySendError::Full(_)) => {
@ -453,7 +496,19 @@ impl MePool {
.increment_me_writer_pick_blocking_fallback_total();
let effective_our_addr = SocketAddr::new(w.source_ip, our_addr.port());
let (payload, meta) = build_routed_payload(effective_our_addr);
match w.tx.clone().reserve_owned().await {
let reserve_result =
if let Some(timeout) = self.route_runtime.me_route_blocking_send_timeout {
match tokio::time::timeout(timeout, w.tx.clone().reserve_owned()).await {
Ok(result) => result,
Err(_) => {
self.stats.increment_me_writer_pick_full_total(pick_mode);
continue;
}
}
} else {
w.tx.clone().reserve_owned().await
};
match reserve_result {
Ok(permit) => {
if !self.registry.bind_writer(conn_id, w.id, meta).await {
debug!(
@ -471,6 +526,7 @@ impl MePool {
if w.generation < self.current_generation() {
self.stats.increment_pool_stale_pick_total();
}
self.note_hybrid_route_success();
return Ok(());
}
Err(_) => {
@ -483,7 +539,7 @@ impl MePool {
}
async fn wait_for_writer_until(&self, deadline: Instant) -> bool {
let waiter = self.writer_available.notified();
let mut rx = self.writer_epoch.subscribe();
if !self.writers.read().await.is_empty() {
return true;
}
@ -492,13 +548,14 @@ impl MePool {
return !self.writers.read().await.is_empty();
}
let timeout = deadline.saturating_duration_since(now);
if tokio::time::timeout(timeout, waiter).await.is_ok() {
return true;
if tokio::time::timeout(timeout, rx.changed()).await.is_ok() {
return !self.writers.read().await.is_empty();
}
!self.writers.read().await.is_empty()
}
async fn wait_for_candidate_until(&self, routed_dc: i32, deadline: Instant) -> bool {
let mut rx = self.writer_epoch.subscribe();
loop {
if self.has_candidate_for_target_dc(routed_dc).await {
return true;
@ -509,7 +566,6 @@ impl MePool {
return self.has_candidate_for_target_dc(routed_dc).await;
}
let waiter = self.writer_available.notified();
if self.has_candidate_for_target_dc(routed_dc).await {
return true;
}
@ -517,7 +573,7 @@ impl MePool {
if remaining.is_zero() {
return self.has_candidate_for_target_dc(routed_dc).await;
}
if tokio::time::timeout(remaining, waiter).await.is_err() {
if tokio::time::timeout(remaining, rx.changed()).await.is_err() {
return self.has_candidate_for_target_dc(routed_dc).await;
}
}
@ -587,6 +643,9 @@ impl MePool {
hybrid_last_recovery_at: &mut Option<Instant>,
hybrid_wait_step: Duration,
) {
if !self.try_consume_hybrid_recovery_trigger_slot(HYBRID_RECOVERY_TRIGGER_MIN_INTERVAL_MS) {
return;
}
if let Some(last) = *hybrid_last_recovery_at
&& last.elapsed() < hybrid_wait_step
{
@ -602,6 +661,78 @@ impl MePool {
*hybrid_last_recovery_at = Some(Instant::now());
}
fn hybrid_total_wait_budget(&self) -> Duration {
let base = self
.route_runtime
.me_route_hybrid_max_wait
.max(Duration::from_millis(50));
let now_ms = Self::now_epoch_millis();
let last_success_ms = self
.route_runtime
.me_route_last_success_epoch_ms
.load(Ordering::Relaxed);
if last_success_ms != 0
&& now_ms.saturating_sub(last_success_ms) <= HYBRID_RECENT_SUCCESS_WINDOW_MS
{
return base.saturating_mul(2);
}
base
}
fn note_hybrid_route_success(&self) {
self.route_runtime
.me_route_last_success_epoch_ms
.store(Self::now_epoch_millis(), Ordering::Relaxed);
}
fn on_hybrid_timeout(&self, deadline: Instant, routed_dc: i32) {
self.stats.increment_me_hybrid_timeout_total();
let now_ms = Self::now_epoch_millis();
let mut last_warn_ms = self
.route_runtime
.me_route_hybrid_timeout_warn_epoch_ms
.load(Ordering::Relaxed);
while now_ms.saturating_sub(last_warn_ms) >= HYBRID_TIMEOUT_WARN_RATE_LIMIT_MS {
match self
.route_runtime
.me_route_hybrid_timeout_warn_epoch_ms
.compare_exchange_weak(last_warn_ms, now_ms, Ordering::AcqRel, Ordering::Relaxed)
{
Ok(_) => {
warn!(
routed_dc,
budget_ms = self.hybrid_total_wait_budget().as_millis() as u64,
elapsed_ms = deadline.elapsed().as_millis() as u64,
"ME hybrid route timeout reached"
);
break;
}
Err(actual) => last_warn_ms = actual,
}
}
}
fn try_consume_hybrid_recovery_trigger_slot(&self, min_interval_ms: u64) -> bool {
let now_ms = Self::now_epoch_millis();
let mut last_trigger_ms = self
.route_runtime
.me_async_recovery_last_trigger_epoch_ms
.load(Ordering::Relaxed);
loop {
if now_ms.saturating_sub(last_trigger_ms) < min_interval_ms {
return false;
}
match self
.route_runtime
.me_async_recovery_last_trigger_epoch_ms
.compare_exchange_weak(last_trigger_ms, now_ms, Ordering::AcqRel, Ordering::Relaxed)
{
Ok(_) => return true,
Err(actual) => last_trigger_ms = actual,
}
}
}
pub async fn send_close(self: &Arc<Self>, conn_id: u64) -> Result<()> {
if let Some(w) = self.registry.get_writer(conn_id).await {
let mut p = Vec::with_capacity(12);
@ -749,7 +880,7 @@ impl MePool {
(self.writer_idle_rank_for_selection(writer, idle_since_by_writer, now_epoch_secs)
as u64)
* 100;
let queue_cap = self.writer_cmd_channel_capacity.max(1) as u64;
let queue_cap = self.writer_lifecycle.writer_cmd_channel_capacity.max(1) as u64;
let queue_remaining = writer.tx.capacity() as u64;
let queue_used = queue_cap.saturating_sub(queue_remaining.min(queue_cap));
let queue_util_pct = queue_used.saturating_mul(100) / queue_cap;

View File

@ -113,6 +113,8 @@ async fn make_pool(
general.me_warn_rate_limit_ms,
MeRouteNoWriterMode::default(),
general.me_route_no_writer_wait_ms,
general.me_route_hybrid_max_wait_ms,
general.me_route_blocking_send_timeout_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
);

View File

@ -111,6 +111,8 @@ async fn make_pool(
general.me_warn_rate_limit_ms,
MeRouteNoWriterMode::default(),
general.me_route_no_writer_wait_ms,
general.me_route_hybrid_max_wait_ms,
general.me_route_blocking_send_timeout_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
);

View File

@ -106,6 +106,8 @@ async fn make_pool(me_pool_drain_threshold: u64) -> Arc<MePool> {
general.me_warn_rate_limit_ms,
MeRouteNoWriterMode::default(),
general.me_route_no_writer_wait_ms,
general.me_route_hybrid_max_wait_ms,
general.me_route_blocking_send_timeout_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
)

View File

@ -95,6 +95,8 @@ async fn make_pool() -> Arc<MePool> {
general.me_warn_rate_limit_ms,
MeRouteNoWriterMode::default(),
general.me_route_no_writer_wait_ms,
general.me_route_hybrid_max_wait_ms,
general.me_route_blocking_send_timeout_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
)

View File

@ -35,7 +35,7 @@ async fn make_pool() -> Arc<MePool> {
NetworkDecision::default(),
None,
Arc::new(SecureRandom::new()),
Arc::new(Stats::default()),
Arc::new(Stats::new()),
general.me_keepalive_enabled,
general.me_keepalive_interval_secs,
general.me_keepalive_jitter_secs,
@ -100,6 +100,8 @@ async fn make_pool() -> Arc<MePool> {
general.me_warn_rate_limit_ms,
MeRouteNoWriterMode::default(),
general.me_route_no_writer_wait_ms,
general.me_route_hybrid_max_wait_ms,
general.me_route_blocking_send_timeout_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
)
@ -171,10 +173,15 @@ async fn bind_conn_to_writer(pool: &Arc<MePool>, writer_id: u64, port: u16) -> u
}
#[tokio::test]
async fn remove_draining_writer_still_quarantines_flapping_endpoint() {
async fn remove_draining_writer_does_not_quarantine_flapping_endpoint() {
let pool = make_pool().await;
let writer_id = 77;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 12, 0, 77)), 443);
let before_total = pool.stats.get_me_endpoint_quarantine_total();
let before_unexpected = pool.stats.get_me_endpoint_quarantine_unexpected_total();
let before_suppressed = pool
.stats
.get_me_endpoint_quarantine_draining_suppressed_total();
insert_writer(
&pool,
writer_id,
@ -198,8 +205,18 @@ async fn remove_draining_writer_still_quarantines_flapping_endpoint() {
"writer must be removed from pool after cleanup"
);
assert!(
pool.is_endpoint_quarantined(addr).await,
"draining removals must still quarantine flapping endpoints"
!pool.is_endpoint_quarantined(addr).await,
"draining removals must not quarantine endpoint"
);
assert_eq!(pool.stats.get_me_endpoint_quarantine_total(), before_total);
assert_eq!(
pool.stats.get_me_endpoint_quarantine_unexpected_total(),
before_unexpected
);
assert_eq!(
pool.stats
.get_me_endpoint_quarantine_draining_suppressed_total(),
before_suppressed + 1
);
assert_eq!(pool.conn_count.load(Ordering::Relaxed), 0);
}
@ -255,16 +272,21 @@ async fn edge_draining_only_detach_rejects_active_writer() {
}
#[tokio::test]
async fn adversarial_blackhat_single_remove_establishes_single_quarantine_entry() {
async fn adversarial_blackhat_single_unexpected_remove_establishes_single_quarantine_entry() {
let pool = make_pool().await;
let writer_id = 93;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 12, 0, 93)), 443);
let before_total = pool.stats.get_me_endpoint_quarantine_total();
let before_unexpected = pool.stats.get_me_endpoint_quarantine_unexpected_total();
let before_suppressed = pool
.stats
.get_me_endpoint_quarantine_draining_suppressed_total();
insert_writer(
&pool,
writer_id,
2,
addr,
true,
false,
Instant::now() - Duration::from_secs(1),
)
.await;
@ -272,6 +294,49 @@ async fn adversarial_blackhat_single_remove_establishes_single_quarantine_entry(
pool.remove_writer_and_close_clients(writer_id).await;
assert!(pool.is_endpoint_quarantined(addr).await);
assert_eq!(pool.endpoint_quarantine.lock().await.len(), 1);
assert_eq!(
pool.stats.get_me_endpoint_quarantine_total(),
before_total + 1
);
assert_eq!(
pool.stats.get_me_endpoint_quarantine_unexpected_total(),
before_unexpected + 1
);
assert_eq!(
pool.stats
.get_me_endpoint_quarantine_draining_suppressed_total(),
before_suppressed
);
}
#[tokio::test]
async fn remove_ultra_short_uptime_writer_skips_flap_quarantine() {
let pool = make_pool().await;
let writer_id = 931;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 12, 0, 131)), 443);
let before_total = pool.stats.get_me_endpoint_quarantine_total();
let before_unexpected = pool.stats.get_me_endpoint_quarantine_unexpected_total();
insert_writer(
&pool,
writer_id,
2,
addr,
false,
Instant::now() - Duration::from_millis(50),
)
.await;
pool.remove_writer_and_close_clients(writer_id).await;
assert!(
!pool.is_endpoint_quarantined(addr).await,
"ultra-short unexpected lifetime must not quarantine endpoint"
);
assert_eq!(pool.stats.get_me_endpoint_quarantine_total(), before_total);
assert_eq!(
pool.stats.get_me_endpoint_quarantine_unexpected_total(),
before_unexpected + 1
);
}
#[tokio::test]

View File

@ -106,6 +106,8 @@ async fn make_pool() -> (Arc<MePool>, Arc<SecureRandom>) {
general.me_warn_rate_limit_ms,
general.me_route_no_writer_mode,
general.me_route_no_writer_wait_ms,
general.me_route_hybrid_max_wait_ms,
general.me_route_blocking_send_timeout_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
);

View File

@ -34,8 +34,6 @@ const NUM_DCS: usize = 5;
/// Timeout for individual DC ping attempt
const DC_PING_TIMEOUT_SECS: u64 = 5;
/// Timeout for direct TG DC TCP connect readiness.
const DIRECT_CONNECT_TIMEOUT_SECS: u64 = 10;
/// Interval between upstream health-check cycles.
const HEALTH_CHECK_INTERVAL_SECS: u64 = 30;
/// Timeout for a single health-check connect attempt.
@ -319,6 +317,8 @@ pub struct UpstreamManager {
connect_retry_attempts: u32,
connect_retry_backoff: Duration,
connect_budget: Duration,
/// Per-attempt TCP connect timeout to Telegram DC (`[timeouts] tg_connect`, seconds).
tg_connect_timeout_secs: u64,
unhealthy_fail_threshold: u32,
connect_failfast_hard_errors: bool,
no_upstreams_warn_epoch_ms: Arc<AtomicU64>,
@ -332,6 +332,7 @@ impl UpstreamManager {
connect_retry_attempts: u32,
connect_retry_backoff_ms: u64,
connect_budget_ms: u64,
tg_connect_timeout_secs: u64,
unhealthy_fail_threshold: u32,
connect_failfast_hard_errors: bool,
stats: Arc<Stats>,
@ -347,6 +348,7 @@ impl UpstreamManager {
connect_retry_attempts: connect_retry_attempts.max(1),
connect_retry_backoff: Duration::from_millis(connect_retry_backoff_ms),
connect_budget: Duration::from_millis(connect_budget_ms.max(1)),
tg_connect_timeout_secs: tg_connect_timeout_secs.max(1),
unhealthy_fail_threshold: unhealthy_fail_threshold.max(1),
connect_failfast_hard_errors,
no_upstreams_warn_epoch_ms: Arc::new(AtomicU64::new(0)),
@ -797,8 +799,8 @@ impl UpstreamManager {
break;
}
let remaining_budget = self.connect_budget.saturating_sub(elapsed);
let attempt_timeout =
Duration::from_secs(DIRECT_CONNECT_TIMEOUT_SECS).min(remaining_budget);
let attempt_timeout = Duration::from_secs(self.tg_connect_timeout_secs)
.min(remaining_budget);
if attempt_timeout.is_zero() {
last_error = Some(ProxyError::ConnectionTimeout {
addr: target.to_string(),
@ -1901,6 +1903,7 @@ mod tests {
1,
100,
1000,
10,
1,
false,
Arc::new(Stats::new()),