Add regression and security tests for relay quota and TLS stream handling

- Introduced regression tests for relay quota wake liveness to ensure proper handling of contention and wake events.
- Added adversarial tests to validate the behavior of the quota system under stress and contention scenarios.
- Implemented security tests for the TLS stream to verify the preservation of pending plaintext during state transitions.
- Enhanced the pool writer tests to ensure proper quarantine behavior and validate the removal of writers from the registry.
- Included fuzz testing to assess the robustness of the quota and TLS handling mechanisms against unexpected inputs and states.
This commit is contained in:
David Osipov
2026-03-21 15:16:20 +04:00
parent 3b86a883b9
commit b930ea1ec5
16 changed files with 1790 additions and 34 deletions

View File

@@ -591,14 +591,9 @@ impl MePool {
if let Some(tx) = close_tx {
let _ = tx.send(WriterCommand::Close).await;
}
if let Some(addr) = removed_addr
&& let Some(uptime) = removed_uptime
{
// Quarantine flapping endpoints regardless of draining state.
self.maybe_quarantine_flapping_endpoint(addr, uptime).await;
}
if let Some(addr) = removed_addr {
if let Some(uptime) = removed_uptime {
// Quarantine flapping endpoints regardless of draining state.
self.maybe_quarantine_flapping_endpoint(addr, uptime).await;
}
if trigger_refill

View File

@@ -1,4 +1,4 @@
use std::collections::HashMap;
use std::collections::{HashMap, HashSet};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicU8, AtomicU32, AtomicU64, Ordering};
@@ -9,6 +9,7 @@ use tokio_util::sync::CancellationToken;
use super::codec::WriterCommand;
use super::pool::{MePool, MeWriter, WriterContour};
use super::registry::ConnMeta;
use crate::config::{GeneralConfig, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode};
use crate::crypto::SecureRandom;
use crate::network::probe::NetworkDecision;
@@ -141,6 +142,34 @@ async fn insert_writer(
pool.conn_count.fetch_add(1, Ordering::Relaxed);
}
async fn current_writer_ids(pool: &Arc<MePool>) -> HashSet<u64> {
pool.writers
.read()
.await
.iter()
.map(|writer| writer.id)
.collect()
}
async fn bind_conn_to_writer(pool: &Arc<MePool>, writer_id: u64, port: u16) -> u64 {
let (conn_id, _rx) = pool.registry.register().await;
let bound = pool
.registry
.bind_writer(
conn_id,
writer_id,
ConnMeta {
target_dc: 2,
client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), port),
our_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443),
proto_flags: 0,
},
)
.await;
assert!(bound, "writer binding must succeed");
conn_id
}
#[tokio::test]
async fn remove_draining_writer_still_quarantines_flapping_endpoint() {
let pool = make_pool().await;
@@ -174,3 +203,180 @@ async fn remove_draining_writer_still_quarantines_flapping_endpoint() {
);
assert_eq!(pool.conn_count.load(Ordering::Relaxed), 0);
}
#[tokio::test]
async fn positive_remove_writer_cleans_bound_registry_routes() {
let pool = make_pool().await;
let writer_id = 88;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 12, 0, 88)), 443);
insert_writer(&pool, writer_id, 2, addr, false, Instant::now()).await;
let conn_id = bind_conn_to_writer(&pool, writer_id, 7301).await;
assert!(pool.registry.get_writer(conn_id).await.is_some());
pool.remove_writer_and_close_clients(writer_id).await;
assert!(pool.registry.get_writer(conn_id).await.is_none());
assert!(!current_writer_ids(&pool).await.contains(&writer_id));
assert_eq!(pool.conn_count.load(Ordering::Relaxed), 0);
}
#[tokio::test]
async fn negative_unknown_writer_removal_is_noop() {
let pool = make_pool().await;
let before_quarantine = pool.stats.get_me_endpoint_quarantine_total();
pool.remove_writer_and_close_clients(9_999_001).await;
assert!(pool.writers.read().await.is_empty());
assert_eq!(pool.conn_count.load(Ordering::Relaxed), 0);
assert_eq!(pool.stats.get_me_endpoint_quarantine_total(), before_quarantine);
}
#[tokio::test]
async fn edge_draining_only_detach_rejects_active_writer() {
let pool = make_pool().await;
let writer_id = 91;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 12, 0, 91)), 443);
insert_writer(&pool, writer_id, 2, addr, false, Instant::now()).await;
let removed = pool.remove_draining_writer_hard_detach(writer_id).await;
assert!(!removed, "active writer must not be detached by draining-only path");
assert!(current_writer_ids(&pool).await.contains(&writer_id));
assert_eq!(pool.conn_count.load(Ordering::Relaxed), 1);
pool.remove_writer_and_close_clients(writer_id).await;
}
#[tokio::test]
async fn adversarial_blackhat_single_remove_establishes_single_quarantine_entry() {
let pool = make_pool().await;
let writer_id = 93;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 12, 0, 93)), 443);
insert_writer(
&pool,
writer_id,
2,
addr,
true,
Instant::now() - Duration::from_secs(1),
)
.await;
pool.remove_writer_and_close_clients(writer_id).await;
assert!(pool.is_endpoint_quarantined(addr).await);
assert_eq!(pool.endpoint_quarantine.lock().await.len(), 1);
}
#[tokio::test]
async fn integration_old_uptime_writer_does_not_trigger_flap_quarantine() {
let pool = make_pool().await;
let writer_id = 94;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 12, 0, 94)), 443);
insert_writer(
&pool,
writer_id,
2,
addr,
false,
Instant::now() - Duration::from_secs(30),
)
.await;
let before = pool.stats.get_me_endpoint_quarantine_total();
pool.remove_writer_and_close_clients(writer_id).await;
let after = pool.stats.get_me_endpoint_quarantine_total();
assert_eq!(after, before);
assert!(!pool.is_endpoint_quarantined(addr).await);
}
#[tokio::test]
async fn light_fuzz_insert_remove_schedule_preserves_pool_invariants() {
let pool = make_pool().await;
let mut seed = 0xA11C_E551_D00D_BAADu64;
let mut model = HashSet::<u64>::new();
for _ in 0..240 {
seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1);
let writer_id = 1 + (seed % 64);
let op_insert = ((seed >> 17) & 1) == 0;
if op_insert {
if !model.contains(&writer_id) {
let ip_octet = (writer_id % 250) as u8;
let addr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(127, 13, 0, ip_octet.max(1))),
4000 + writer_id as u16,
);
let draining = ((seed >> 33) & 1) == 1;
let created_at = if draining {
Instant::now() - Duration::from_secs(1)
} else {
Instant::now() - Duration::from_secs(30)
};
insert_writer(&pool, writer_id, 2, addr, draining, created_at).await;
model.insert(writer_id);
}
} else {
pool.remove_writer_and_close_clients(writer_id).await;
model.remove(&writer_id);
}
let actual_ids = current_writer_ids(&pool).await;
assert_eq!(actual_ids, model, "writer-id set must match model under fuzz schedule");
assert_eq!(pool.conn_count.load(Ordering::Relaxed) as usize, model.len());
}
for writer_id in model {
pool.remove_writer_and_close_clients(writer_id).await;
}
assert!(pool.writers.read().await.is_empty());
assert_eq!(pool.conn_count.load(Ordering::Relaxed), 0);
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn stress_parallel_duplicate_removals_are_idempotent() {
let pool = make_pool().await;
for writer_id in 1..=48u64 {
let addr = SocketAddr::new(
IpAddr::V4(Ipv4Addr::new(127, 14, (writer_id / 250) as u8, (writer_id % 250) as u8)),
5000 + writer_id as u16,
);
insert_writer(
&pool,
writer_id,
2,
addr,
true,
Instant::now() - Duration::from_secs(1),
)
.await;
}
let mut tasks = Vec::new();
for worker in 0..8u64 {
let pool = Arc::clone(&pool);
tasks.push(tokio::spawn(async move {
for writer_id in 1..=48u64 {
if ((writer_id + worker) & 1) == 0 {
pool.remove_writer_and_close_clients(writer_id).await;
} else {
pool.remove_writer_and_close_clients(100_000 + writer_id).await;
}
}
}));
}
for task in tasks {
task.await.expect("stress remover task must not panic");
}
for writer_id in 1..=48u64 {
pool.remove_writer_and_close_clients(writer_id).await;
}
assert!(pool.writers.read().await.is_empty());
assert_eq!(pool.conn_count.load(Ordering::Relaxed), 0);
}