Refactor proxy and transport modules for improved safety and performance

- Enhanced linting rules in `src/proxy/mod.rs` to enforce stricter code quality checks in production.
- Updated hash functions in `src/proxy/middle_relay.rs` for better efficiency.
- Added new security tests in `src/proxy/tests/middle_relay_stub_completion_security_tests.rs` to validate desynchronization behavior.
- Removed ignored test stubs in `src/proxy/tests/middle_relay_security_tests.rs` to clean up the test suite.
- Improved error handling and code readability in various transport modules, including `src/transport/middle_proxy/config_updater.rs` and `src/transport/middle_proxy/pool.rs`.
- Introduced new padding functions in `src/stream/frame_stream_padding_security_tests.rs` to ensure consistent behavior across different implementations.
- Adjusted TLS stream validation in `src/stream/tls_stream.rs` for better boundary checking.
- General code cleanup and dead code elimination across multiple files to enhance maintainability.
This commit is contained in:
David Osipov
2026-03-21 20:05:07 +04:00
parent a6c298b633
commit 4c32370b25
35 changed files with 794 additions and 174 deletions

View File

@@ -757,6 +757,284 @@ fn adversarial_parent_swap_after_check_is_blocked_by_anchored_open() {
);
}
#[cfg(unix)]
#[test]
fn anchored_open_nix_path_writes_expected_lines() {
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-anchored-open-ok-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("anchored-open-ok base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-anchored-open-ok-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
let _ = fs::remove_file(&sanitized.resolved_path);
let mut first = open_unknown_dc_log_append_anchored(&sanitized)
.expect("anchored open must create log file in allowed parent");
append_unknown_dc_line(&mut first, 31_200).expect("first append must succeed");
let mut second = open_unknown_dc_log_append_anchored(&sanitized)
.expect("anchored reopen must succeed for existing regular file");
append_unknown_dc_line(&mut second, 31_201).expect("second append must succeed");
let content =
fs::read_to_string(&sanitized.resolved_path).expect("anchored log file must be readable");
let lines: Vec<&str> = content.lines().filter(|line| !line.trim().is_empty()).collect();
assert_eq!(lines.len(), 2, "expected one line per anchored append call");
assert!(
lines.contains(&"dc_idx=31200") && lines.contains(&"dc_idx=31201"),
"anchored append output must contain both expected dc_idx lines"
);
}
#[cfg(unix)]
#[test]
fn anchored_open_parallel_appends_preserve_line_integrity() {
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-anchored-open-parallel-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("anchored-open-parallel base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-anchored-open-parallel-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
let _ = fs::remove_file(&sanitized.resolved_path);
let mut workers = Vec::new();
for idx in 0..64i16 {
let sanitized = sanitized.clone();
workers.push(std::thread::spawn(move || {
let mut file = open_unknown_dc_log_append_anchored(&sanitized)
.expect("anchored open must succeed in worker");
append_unknown_dc_line(&mut file, 32_000 + idx).expect("worker append must succeed");
}));
}
for worker in workers {
worker.join().expect("worker must not panic");
}
let content =
fs::read_to_string(&sanitized.resolved_path).expect("parallel log file must be readable");
let lines: Vec<&str> = content.lines().filter(|line| !line.trim().is_empty()).collect();
assert_eq!(lines.len(), 64, "expected one complete line per worker append");
for line in lines {
assert!(
line.starts_with("dc_idx="),
"line must keep dc_idx prefix and not be interleaved: {line}"
);
let value = line
.strip_prefix("dc_idx=")
.expect("prefix checked above")
.parse::<i16>();
assert!(
value.is_ok(),
"line payload must remain parseable i16 and not be corrupted: {line}"
);
}
}
#[cfg(unix)]
#[test]
fn anchored_open_creates_private_0600_file_permissions() {
use std::os::unix::fs::PermissionsExt;
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-anchored-perms-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("anchored-perms base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-anchored-perms-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
let _ = fs::remove_file(&sanitized.resolved_path);
let mut file = open_unknown_dc_log_append_anchored(&sanitized)
.expect("anchored open must create file with restricted mode");
append_unknown_dc_line(&mut file, 31_210).expect("initial append must succeed");
drop(file);
let mode = fs::metadata(&sanitized.resolved_path)
.expect("created log file metadata must be readable")
.permissions()
.mode()
& 0o777;
assert_eq!(
mode, 0o600,
"anchored open must create unknown-dc log file with owner-only rw permissions"
);
}
#[cfg(unix)]
#[test]
fn anchored_open_rejects_existing_symlink_target() {
use std::os::unix::fs::symlink;
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-anchored-symlink-target-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("anchored-symlink-target base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-anchored-symlink-target-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
let outside = std::env::temp_dir().join(format!(
"telemt-unknown-dc-anchored-symlink-outside-{}.log",
std::process::id()
));
fs::write(&outside, "outside\n").expect("outside baseline file must be writable");
let _ = fs::remove_file(&sanitized.resolved_path);
symlink(&outside, &sanitized.resolved_path)
.expect("target symlink for anchored-open rejection test must be creatable");
let err = open_unknown_dc_log_append_anchored(&sanitized)
.expect_err("anchored open must reject symlinked filename target");
assert_eq!(
err.raw_os_error(),
Some(libc::ELOOP),
"anchored open should fail closed with ELOOP on symlinked target"
);
}
#[cfg(unix)]
#[test]
fn anchored_open_high_contention_multi_write_preserves_complete_lines() {
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-anchored-contention-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("anchored-contention base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-anchored-contention-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
let _ = fs::remove_file(&sanitized.resolved_path);
let workers = 24usize;
let rounds = 40usize;
let mut threads = Vec::new();
for worker in 0..workers {
let sanitized = sanitized.clone();
threads.push(std::thread::spawn(move || {
for round in 0..rounds {
let mut file = open_unknown_dc_log_append_anchored(&sanitized)
.expect("anchored open must succeed under contention");
let dc_idx = 20_000i16.wrapping_add((worker * rounds + round) as i16);
append_unknown_dc_line(&mut file, dc_idx)
.expect("each contention append must complete");
}
}));
}
for thread in threads {
thread.join().expect("contention worker must not panic");
}
let content = fs::read_to_string(&sanitized.resolved_path)
.expect("contention output file must be readable");
let lines: Vec<&str> = content.lines().filter(|line| !line.trim().is_empty()).collect();
assert_eq!(
lines.len(),
workers * rounds,
"every contention append must produce exactly one line"
);
let mut unique = std::collections::HashSet::new();
for line in lines {
assert!(
line.starts_with("dc_idx="),
"line must preserve expected prefix under heavy contention: {line}"
);
let value = line
.strip_prefix("dc_idx=")
.expect("prefix validated")
.parse::<i16>()
.expect("line payload must remain parseable i16 under contention");
unique.insert(value);
}
assert_eq!(
unique.len(),
workers * rounds,
"contention output must not lose or duplicate logical writes"
);
}
#[cfg(unix)]
#[test]
fn append_unknown_dc_line_returns_error_for_read_only_descriptor() {
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-append-ro-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("append-ro base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-append-ro-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
fs::write(&sanitized.resolved_path, "seed\n").expect("seed file must be writable");
let mut readonly = std::fs::OpenOptions::new()
.read(true)
.open(&sanitized.resolved_path)
.expect("readonly file open must succeed");
append_unknown_dc_line(&mut readonly, 31_222)
.expect_err("append on readonly descriptor must fail closed");
let content_after =
fs::read_to_string(&sanitized.resolved_path).expect("seed file must remain readable");
assert_eq!(
nonempty_line_count(&content_after),
1,
"failed readonly append must not modify persisted unknown-dc log content"
);
}
#[tokio::test]
async fn unknown_dc_absolute_log_path_writes_one_entry() {
let _guard = unknown_dc_test_lock()

View File

@@ -953,24 +953,6 @@ fn light_fuzz_desync_dedup_temporal_gate_behavior_is_stable() {
panic!("expected at least one post-window sample to re-emit forensic record");
}
#[test]
#[ignore = "Tracking for M-04: Verify should_emit_full_desync returns true on first occurrence and false on duplicate within window"]
fn should_emit_full_desync_filters_duplicates() {
unimplemented!("Stub for M-04");
}
#[test]
#[ignore = "Tracking for M-04: Verify desync dedup eviction behaves correctly under map-full condition"]
fn desync_dedup_eviction_under_map_full_condition() {
unimplemented!("Stub for M-04");
}
#[tokio::test]
#[ignore = "Tracking for M-05: Verify C2ME channel full path yields then sends under backpressure"]
async fn c2me_channel_full_path_yields_then_sends() {
unimplemented!("Stub for M-05");
}
fn make_forensics_state() -> RelayForensicsState {
RelayForensicsState {
trace_id: 1,

View File

@@ -0,0 +1,168 @@
use super::*;
use crate::stream::BufferPool;
use std::collections::HashSet;
use std::sync::Arc;
use tokio::time::{Duration as TokioDuration, timeout};
fn make_pooled_payload(data: &[u8]) -> PooledBuffer {
let pool = Arc::new(BufferPool::with_config(data.len().max(1), 4));
let mut payload = pool.get();
payload.resize(data.len(), 0);
payload[..data.len()].copy_from_slice(data);
payload
}
#[test]
#[ignore = "Tracking for M-04: Verify should_emit_full_desync returns true on first occurrence and false on duplicate within window"]
fn should_emit_full_desync_filters_duplicates() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let key = 0x4D04_0000_0000_0001_u64;
let base = Instant::now();
assert!(
should_emit_full_desync(key, false, base),
"first occurrence must emit full forensic record"
);
assert!(
!should_emit_full_desync(key, false, base),
"duplicate at same timestamp must be suppressed"
);
let within_window = base + DESYNC_DEDUP_WINDOW - TokioDuration::from_millis(1);
assert!(
!should_emit_full_desync(key, false, within_window),
"duplicate strictly inside dedup window must stay suppressed"
);
let on_window_edge = base + DESYNC_DEDUP_WINDOW;
assert!(
should_emit_full_desync(key, false, on_window_edge),
"duplicate at window boundary must re-emit and refresh"
);
}
#[test]
#[ignore = "Tracking for M-04: Verify desync dedup eviction behaves correctly under map-full condition"]
fn desync_dedup_eviction_under_map_full_condition() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let base = Instant::now();
for key in 0..DESYNC_DEDUP_MAX_ENTRIES as u64 {
assert!(
should_emit_full_desync(key, false, base),
"unique key should be inserted while warming dedup cache"
);
}
let dedup = DESYNC_DEDUP
.get()
.expect("dedup map must exist after warm-up insertions");
assert_eq!(
dedup.len(),
DESYNC_DEDUP_MAX_ENTRIES,
"cache warm-up must reach exact hard cap"
);
let before_keys: HashSet<u64> = dedup.iter().map(|entry| *entry.key()).collect();
let newcomer_key = 0x4D04_FFFF_FFFF_0001_u64;
assert!(
should_emit_full_desync(newcomer_key, false, base),
"first newcomer at map-full must emit under bounded full-cache gate"
);
let after_keys: HashSet<u64> = dedup.iter().map(|entry| *entry.key()).collect();
assert_eq!(
dedup.len(),
DESYNC_DEDUP_MAX_ENTRIES,
"map-full insertion must preserve hard capacity bound"
);
assert!(
after_keys.contains(&newcomer_key),
"newcomer must be present after bounded eviction path"
);
let removed_count = before_keys.difference(&after_keys).count();
let added_count = after_keys.difference(&before_keys).count();
assert_eq!(
removed_count, 1,
"map-full insertion must evict exactly one prior key"
);
assert_eq!(
added_count, 1,
"map-full insertion must add exactly one newcomer key"
);
assert!(
!should_emit_full_desync(newcomer_key, false, base),
"immediate duplicate newcomer must remain suppressed"
);
}
#[tokio::test]
#[ignore = "Tracking for M-05: Verify C2ME channel full path yields then sends under backpressure"]
async fn c2me_channel_full_path_yields_then_sends() {
let (tx, mut rx) = mpsc::channel::<C2MeCommand>(1);
tx.send(C2MeCommand::Data {
payload: make_pooled_payload(&[0xAA]),
flags: 1,
})
.await
.expect("priming queue with one frame must succeed");
let tx2 = tx.clone();
let producer = tokio::spawn(async move {
enqueue_c2me_command(
&tx2,
C2MeCommand::Data {
payload: make_pooled_payload(&[0xBB, 0xCC]),
flags: 2,
},
)
.await
});
tokio::task::yield_now().await;
tokio::time::sleep(TokioDuration::from_millis(10)).await;
assert!(
!producer.is_finished(),
"producer should stay pending while queue is full"
);
let first = timeout(TokioDuration::from_millis(100), rx.recv())
.await
.expect("receiver should observe primed frame")
.expect("first queued command must exist");
match first {
C2MeCommand::Data { payload, flags } => {
assert_eq!(payload.as_ref(), &[0xAA]);
assert_eq!(flags, 1);
}
C2MeCommand::Close => panic!("unexpected close command as first item"),
}
producer
.await
.expect("producer task must not panic")
.expect("blocked enqueue must succeed once receiver drains capacity");
let second = timeout(TokioDuration::from_millis(100), rx.recv())
.await
.expect("receiver should observe backpressure-resumed frame")
.expect("second queued command must exist");
match second {
C2MeCommand::Data { payload, flags } => {
assert_eq!(payload.as_ref(), &[0xBB, 0xCC]);
assert_eq!(flags, 2);
}
C2MeCommand::Close => panic!("unexpected close command as second item"),
}
}