Merge upstream/flow-sec into pr-sec-1

This commit is contained in:
David Osipov
2026-03-17 19:48:53 +04:00
8 changed files with 234 additions and 64 deletions

View File

@@ -160,6 +160,7 @@ pub struct MePool {
pub(super) refill_inflight: Arc<Mutex<HashSet<RefillEndpointKey>>>,
pub(super) refill_inflight_dc: Arc<Mutex<HashSet<RefillDcKey>>>,
pub(super) conn_count: AtomicUsize,
pub(super) draining_active_runtime: AtomicU64,
pub(super) stats: Arc<crate::stats::Stats>,
pub(super) generation: AtomicU64,
pub(super) active_generation: AtomicU64,
@@ -438,6 +439,7 @@ impl MePool {
refill_inflight: Arc::new(Mutex::new(HashSet::new())),
refill_inflight_dc: Arc::new(Mutex::new(HashSet::new())),
conn_count: AtomicUsize::new(0),
draining_active_runtime: AtomicU64::new(0),
generation: AtomicU64::new(1),
active_generation: AtomicU64::new(1),
warm_generation: AtomicU64::new(0),
@@ -690,6 +692,32 @@ impl MePool {
}
}
pub(super) fn draining_active_runtime(&self) -> u64 {
self.draining_active_runtime.load(Ordering::Relaxed)
}
pub(super) fn increment_draining_active_runtime(&self) {
self.draining_active_runtime.fetch_add(1, Ordering::Relaxed);
}
pub(super) fn decrement_draining_active_runtime(&self) {
let mut current = self.draining_active_runtime.load(Ordering::Relaxed);
loop {
if current == 0 {
break;
}
match self.draining_active_runtime.compare_exchange_weak(
current,
current - 1,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(actual) => current = actual,
}
}
}
pub(super) async fn key_selector(&self) -> u32 {
self.proxy_secret.read().await.key_selector
}

View File

@@ -141,6 +141,38 @@ impl MePool {
out
}
pub(super) async fn has_non_draining_writer_per_desired_dc_group(&self) -> bool {
let desired_by_dc = self.desired_dc_endpoints().await;
let required_dcs: HashSet<i32> = desired_by_dc
.iter()
.filter_map(|(dc, endpoints)| {
if endpoints.is_empty() {
None
} else {
Some(*dc)
}
})
.collect();
if required_dcs.is_empty() {
return true;
}
let ws = self.writers.read().await;
let mut covered_dcs = HashSet::<i32>::with_capacity(required_dcs.len());
for writer in ws.iter() {
if writer.draining.load(Ordering::Relaxed) {
continue;
}
if required_dcs.contains(&writer.writer_dc) {
covered_dcs.insert(writer.writer_dc);
if covered_dcs.len() == required_dcs.len() {
return true;
}
}
}
false
}
fn hardswap_warmup_connect_delay_ms(&self) -> u64 {
let min_ms = self.me_hardswap_warmup_delay_min_ms.load(Ordering::Relaxed);
let max_ms = self.me_hardswap_warmup_delay_max_ms.load(Ordering::Relaxed);
@@ -475,12 +507,30 @@ impl MePool {
coverage_ratio = format_args!("{coverage_ratio:.3}"),
min_ratio = format_args!("{min_ratio:.3}"),
drain_timeout_secs,
"ME reinit cycle covered; draining stale writers"
"ME reinit cycle covered; processing stale writers"
);
self.stats.increment_pool_swap_total();
let can_drop_with_replacement = self
.has_non_draining_writer_per_desired_dc_group()
.await;
if can_drop_with_replacement {
info!(
stale_writers = stale_writer_ids.len(),
"ME reinit stale writers: replacement coverage ready, force-closing clients for fast rebind"
);
} else {
warn!(
stale_writers = stale_writer_ids.len(),
"ME reinit stale writers: replacement coverage incomplete, keeping draining fallback"
);
}
for writer_id in stale_writer_ids {
self.mark_writer_draining_with_timeout(writer_id, drain_timeout, !hardswap)
.await;
if can_drop_with_replacement {
self.stats.increment_pool_force_close_total();
self.remove_writer_and_close_clients(writer_id).await;
}
}
if hardswap {
self.clear_pending_hardswap_state();

View File

@@ -514,6 +514,7 @@ impl MePool {
let was_draining = w.draining.load(Ordering::Relaxed);
if was_draining {
self.stats.decrement_pool_drain_active();
self.decrement_draining_active_runtime();
}
self.stats.increment_me_writer_removed_total();
w.cancel.cancel();
@@ -572,6 +573,7 @@ impl MePool {
.store(drain_deadline_epoch_secs, Ordering::Relaxed);
if !already_draining {
self.stats.increment_pool_drain_active();
self.increment_draining_active_runtime();
}
w.contour
.store(WriterContour::Draining.as_u8(), Ordering::Relaxed);

View File

@@ -436,6 +436,19 @@ impl ConnRegistry {
.map(|s| s.is_empty())
.unwrap_or(true)
}
pub(super) async fn non_empty_writer_ids(&self, writer_ids: &[u64]) -> HashSet<u64> {
let inner = self.inner.read().await;
let mut out = HashSet::<u64>::with_capacity(writer_ids.len());
for writer_id in writer_ids {
if let Some(conns) = inner.conns_for_writer.get(writer_id)
&& !conns.is_empty()
{
out.insert(*writer_id);
}
}
out
}
}
#[cfg(test)]
@@ -634,4 +647,35 @@ mod tests {
);
assert!(registry.get_writer(conn_id).await.is_none());
}
#[tokio::test]
async fn non_empty_writer_ids_returns_only_writers_with_bound_clients() {
let registry = ConnRegistry::new();
let (conn_id, _rx) = registry.register().await;
let (writer_tx_a, _writer_rx_a) = tokio::sync::mpsc::channel(8);
let (writer_tx_b, _writer_rx_b) = tokio::sync::mpsc::channel(8);
registry.register_writer(10, writer_tx_a).await;
registry.register_writer(20, writer_tx_b).await;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443);
assert!(
registry
.bind_writer(
conn_id,
10,
ConnMeta {
target_dc: 2,
client_addr: addr,
our_addr: addr,
proto_flags: 0,
},
)
.await
);
let non_empty = registry.non_empty_writer_ids(&[10, 20, 30]).await;
assert!(non_empty.contains(&10));
assert!(!non_empty.contains(&20));
assert!(!non_empty.contains(&30));
}
}