Add comprehensive TLS ClientHello size validation and adversarial tests

- Refactor existing tests to improve clarity and specificity in naming.
- Introduce new tests for minimum and maximum TLS ClientHello sizes, ensuring proper masking behavior for malformed probes.
- Implement differential timing tests to compare latency between malformed TLS and plain web requests, ensuring similar performance characteristics.
- Add adversarial tests for truncated TLS ClientHello probes, verifying that even malformed traffic is masked as legitimate responses.
- Enhance the overall test suite for robustness against probing attacks, focusing on edge cases and potential vulnerabilities in TLS handling.
This commit is contained in:
David Osipov 2026-03-20 20:30:02 +04:00
parent 1689b8a5dc
commit 801f670827
No known key found for this signature in database
GPG Key ID: 0E55C4A47454E82E
6 changed files with 1289 additions and 20 deletions

View File

@ -152,8 +152,14 @@ pub const TLS_RECORD_CHANGE_CIPHER: u8 = 0x14;
pub const TLS_RECORD_APPLICATION: u8 = 0x17;
/// TLS record type: Alert
pub const TLS_RECORD_ALERT: u8 = 0x15;
/// Maximum TLS record size
pub const MAX_TLS_RECORD_SIZE: usize = 16384;
/// Maximum TLS record size (RFC 8446 §5.1: MUST NOT exceed 2^14 = 16_384 bytes)
pub const MAX_TLS_RECORD_SIZE: usize = 16_384;
/// Structural minimum for a valid TLS 1.3 ClientHello with SNI.
/// Derived from RFC 8446 §4.1.2 field layout + Appendix D.4 compat mode.
/// Deliberately conservative (below any real client) to avoid false
/// positives on legitimate connections with compact extension sets.
pub const MIN_TLS_CLIENT_HELLO_SIZE: usize = 100;
/// Maximum TLS chunk size (with overhead)
/// RFC 8446 §5.2 allows up to 16384 + 256 bytes of ciphertext
pub const MAX_TLS_CHUNK_SIZE: usize = 16384 + 256;

View File

@ -110,6 +110,35 @@ fn wrap_tls_application_record(payload: &[u8]) -> Vec<u8> {
record
}
fn tls_clienthello_len_in_bounds(tls_len: usize) -> bool {
(MIN_TLS_CLIENT_HELLO_SIZE..=MAX_TLS_RECORD_SIZE).contains(&tls_len)
}
async fn read_with_progress<R: AsyncRead + Unpin>(reader: &mut R, mut buf: &mut [u8]) -> std::io::Result<usize> {
let mut total = 0usize;
while !buf.is_empty() {
match reader.read(buf).await {
Ok(0) => return Ok(total),
Ok(n) => {
total += n;
let (_, rest) = buf.split_at_mut(n);
buf = rest;
}
Err(e) => return Err(e),
}
}
Ok(total)
}
fn handshake_timeout_with_mask_grace(config: &ProxyConfig) -> Duration {
let base = Duration::from_secs(config.timeouts.client_handshake);
if config.censorship.mask {
base.saturating_add(Duration::from_millis(750))
} else {
base
}
}
fn record_beobachten_class(
beobachten: &BeobachtenStore,
config: &ProxyConfig,
@ -226,7 +255,7 @@ where
debug!(peer = %real_peer, "New connection (generic stream)");
let handshake_timeout = Duration::from_secs(config.timeouts.client_handshake);
let handshake_timeout = handshake_timeout_with_mask_grace(&config);
let stats_for_timeout = stats.clone();
let config_for_timeout = config.clone();
let beobachten_for_timeout = beobachten.clone();
@ -243,12 +272,15 @@ where
if is_tls {
let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
// RFC 8446 §5.1 mandates that TLSPlaintext records must not exceed 2^14
// bytes (16_384). A client claiming a larger record is non-compliant and
// may be an active probe attempting to force large allocations.
//
// Also enforce a minimum record size to avoid trivial/garbage probes.
if !(512..=MAX_TLS_RECORD_SIZE).contains(&tls_len) {
// RFC 8446 §5.1: TLS record payload MUST NOT exceed 2^14 (16_384) bytes.
// Lower bound is a structural minimum for a valid TLS 1.3 ClientHello
// (record header + handshake header + random + session_id + cipher_suites
// + compression + at least one extension with SNI). The previous value of
// 512 was implicitly coupled to TLS_REQUEST_LENGTH=517 from the official
// Telegram MTProxy reference server, leaving only a 5-byte margin and
// incorrectly rejecting compact but spec-compliant ClientHellos from
// third-party clients or future Telegram versions.
if !tls_clienthello_len_in_bounds(tls_len) {
debug!(peer = %real_peer, tls_len = tls_len, max_tls_len = MAX_TLS_RECORD_SIZE, "TLS handshake length out of bounds");
stats.increment_connects_bad();
let (reader, writer) = tokio::io::split(stream);
@ -267,7 +299,44 @@ where
let mut handshake = vec![0u8; 5 + tls_len];
handshake[..5].copy_from_slice(&first_bytes);
stream.read_exact(&mut handshake[5..]).await?;
let body_read = match read_with_progress(&mut stream, &mut handshake[5..]).await {
Ok(n) => n,
Err(e) => {
debug!(peer = %real_peer, error = %e, tls_len = tls_len, "TLS ClientHello body read failed; engaging masking fallback");
stats.increment_connects_bad();
let initial_len = 5;
let (reader, writer) = tokio::io::split(stream);
handle_bad_client(
reader,
writer,
&handshake[..initial_len],
real_peer,
local_addr,
&config,
&beobachten,
)
.await;
return Ok(HandshakeOutcome::Handled);
}
};
if body_read < tls_len {
debug!(peer = %real_peer, got = body_read, expected = tls_len, "Truncated in-range TLS ClientHello; engaging masking fallback");
stats.increment_connects_bad();
let initial_len = 5 + body_read;
let (reader, writer) = tokio::io::split(stream);
handle_bad_client(
reader,
writer,
&handshake[..initial_len],
real_peer,
local_addr,
&config,
&beobachten,
)
.await;
return Ok(HandshakeOutcome::Handled);
}
let (read_half, write_half) = tokio::io::split(stream);
@ -514,7 +583,7 @@ impl RunningClientHandler {
debug!(peer = %peer, error = %e, "Failed to configure client socket");
}
let handshake_timeout = Duration::from_secs(self.config.timeouts.client_handshake);
let handshake_timeout = handshake_timeout_with_mask_grace(&self.config);
let stats = self.stats.clone();
let config_for_timeout = self.config.clone();
let beobachten_for_timeout = self.beobachten.clone();
@ -651,9 +720,15 @@ impl RunningClientHandler {
debug!(peer = %peer, tls_len = tls_len, "Reading TLS handshake");
// See RFC 8446 §5.1: TLSPlaintext records must not exceed 16_384 bytes.
// Treat too-small or too-large lengths as active probes and mask them.
if !(512..=MAX_TLS_RECORD_SIZE).contains(&tls_len) {
// RFC 8446 §5.1: TLS record payload MUST NOT exceed 2^14 (16_384) bytes.
// Lower bound is a structural minimum for a valid TLS 1.3 ClientHello
// (record header + handshake header + random + session_id + cipher_suites
// + compression + at least one extension with SNI). The previous value of
// 512 was implicitly coupled to TLS_REQUEST_LENGTH=517 from the official
// Telegram MTProxy reference server, leaving only a 5-byte margin and
// incorrectly rejecting compact but spec-compliant ClientHellos from
// third-party clients or future Telegram versions.
if !tls_clienthello_len_in_bounds(tls_len) {
debug!(peer = %peer, tls_len = tls_len, max_tls_len = MAX_TLS_RECORD_SIZE, "TLS handshake length out of bounds");
self.stats.increment_connects_bad();
let (reader, writer) = self.stream.into_split();
@ -672,7 +747,43 @@ impl RunningClientHandler {
let mut handshake = vec![0u8; 5 + tls_len];
handshake[..5].copy_from_slice(&first_bytes);
self.stream.read_exact(&mut handshake[5..]).await?;
let body_read = match read_with_progress(&mut self.stream, &mut handshake[5..]).await {
Ok(n) => n,
Err(e) => {
debug!(peer = %peer, error = %e, tls_len = tls_len, "TLS ClientHello body read failed; engaging masking fallback");
self.stats.increment_connects_bad();
let (reader, writer) = self.stream.into_split();
handle_bad_client(
reader,
writer,
&handshake[..5],
peer,
local_addr,
&self.config,
&self.beobachten,
)
.await;
return Ok(HandshakeOutcome::Handled);
}
};
if body_read < tls_len {
debug!(peer = %peer, got = body_read, expected = tls_len, "Truncated in-range TLS ClientHello; engaging masking fallback");
self.stats.increment_connects_bad();
let initial_len = 5 + body_read;
let (reader, writer) = self.stream.into_split();
handle_bad_client(
reader,
writer,
&handshake[..initial_len],
peer,
local_addr,
&self.config,
&self.beobachten,
)
.await;
return Ok(HandshakeOutcome::Handled);
}
let config = self.config.clone();
let replay_checker = self.replay_checker.clone();
@ -1085,3 +1196,15 @@ mod adversarial_tests;
#[cfg(test)]
#[path = "client_tls_mtproto_fallback_security_tests.rs"]
mod tls_mtproto_fallback_security_tests;
#[cfg(test)]
#[path = "client_tls_clienthello_size_security_tests.rs"]
mod tls_clienthello_size_security_tests;
#[cfg(test)]
#[path = "client_tls_clienthello_truncation_adversarial_tests.rs"]
mod tls_clienthello_truncation_adversarial_tests;
#[cfg(test)]
#[path = "client_timing_profile_adversarial_tests.rs"]
mod timing_profile_adversarial_tests;

View File

@ -3546,10 +3546,16 @@ async fn oversized_tls_record_is_masked_in_client_handler_pipeline() {
}
#[tokio::test]
async fn tls_record_len_511_is_rejected_in_generic_stream_pipeline() {
async fn tls_record_len_min_minus_1_is_rejected_in_generic_stream_pipeline() {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let probe = [0x16, 0x03, 0x01, 0x01, 0xff];
let probe = [
0x16,
0x03,
0x01,
(((MIN_TLS_CLIENT_HELLO_SIZE - 1) >> 8) & 0xff) as u8,
((MIN_TLS_CLIENT_HELLO_SIZE - 1) & 0xff) as u8,
];
let backend_reply = b"HTTP/1.1 400 Bad Request\r\nContent-Length: 0\r\n\r\n".to_vec();
let accept_task = tokio::spawn({
@ -3634,19 +3640,25 @@ async fn tls_record_len_511_is_rejected_in_generic_stream_pipeline() {
assert_eq!(
stats.get_connects_bad(),
bad_before + 1,
"TLS record length 511 must be rejected"
"TLS record length below minimum structural ClientHello size must be rejected"
);
}
#[tokio::test]
async fn tls_record_len_511_is_rejected_in_client_handler_pipeline() {
async fn tls_record_len_min_minus_1_is_rejected_in_client_handler_pipeline() {
let mask_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = mask_listener.local_addr().unwrap();
let front_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let front_addr = front_listener.local_addr().unwrap();
let probe = [0x16, 0x03, 0x01, 0x01, 0xff];
let probe = [
0x16,
0x03,
0x01,
(((MIN_TLS_CLIENT_HELLO_SIZE - 1) >> 8) & 0xff) as u8,
((MIN_TLS_CLIENT_HELLO_SIZE - 1) & 0xff) as u8,
];
let backend_reply = b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec();
let mask_accept_task = tokio::spawn({

View File

@ -0,0 +1,367 @@
//! Differential timing-profile adversarial tests.
//! Compare malformed in-range TLS truncation probes with plain web baselines,
//! ensuring masking behavior stays in similar latency buckets.
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::protocol::constants::MIN_TLS_CLIENT_HELLO_SIZE;
use std::net::SocketAddr;
use std::time::{Duration, Instant};
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
const REPLY_404: &[u8] = b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n";
#[derive(Clone, Copy, Debug)]
enum ProbeClass {
MalformedTlsTruncation,
PlainWebBaseline,
}
fn make_test_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats,
))
}
fn malformed_tls_probe() -> Vec<u8> {
vec![
0x16,
0x03,
0x03,
((MIN_TLS_CLIENT_HELLO_SIZE >> 8) & 0xff) as u8,
(MIN_TLS_CLIENT_HELLO_SIZE & 0xff) as u8,
0x41,
]
}
fn plain_web_probe() -> Vec<u8> {
b"GET /timing-profile HTTP/1.1\r\nHost: front.example\r\n\r\n".to_vec()
}
fn summarize(samples_ms: &[u128]) -> (f64, u128, u128, u128) {
let mut sorted = samples_ms.to_vec();
sorted.sort_unstable();
let sum: u128 = sorted.iter().copied().sum();
let mean = sum as f64 / sorted.len() as f64;
let min = sorted[0];
let p95_idx = ((sorted.len() as f64) * 0.95).floor() as usize;
let p95 = sorted[p95_idx.min(sorted.len() - 1)];
let max = sorted[sorted.len() - 1];
(mean, min, p95, max)
}
async fn run_generic_once(class: ProbeClass) -> u128 {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let backend_reply = REPLY_404.to_vec();
let accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut buf = [0u8; 5];
stream.read_exact(&mut buf).await.unwrap();
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
if matches!(class, ProbeClass::PlainWebBaseline) {
cfg.general.modes.classic = false;
cfg.general.modes.secure = false;
}
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let (server_side, mut client_side) = duplex(4096);
let peer: SocketAddr = "203.0.113.210:55110".parse().unwrap();
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
));
let probe = match class {
ProbeClass::MalformedTlsTruncation => malformed_tls_probe(),
ProbeClass::PlainWebBaseline => plain_web_probe(),
};
let started = Instant::now();
client_side.write_all(&probe).await.unwrap();
client_side.shutdown().await.unwrap();
let mut observed = vec![0u8; REPLY_404.len()];
tokio::time::timeout(Duration::from_secs(2), client_side.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, REPLY_404);
tokio::time::timeout(Duration::from_secs(2), accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), handler)
.await
.unwrap()
.unwrap();
started.elapsed().as_millis()
}
async fn run_client_handler_once(class: ProbeClass) -> u128 {
let mask_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = mask_listener.local_addr().unwrap();
let front_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let front_addr = front_listener.local_addr().unwrap();
let backend_reply = REPLY_404.to_vec();
let mask_accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = mask_listener.accept().await.unwrap();
let mut buf = [0u8; 5];
stream.read_exact(&mut buf).await.unwrap();
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
if matches!(class, ProbeClass::PlainWebBaseline) {
cfg.general.modes.classic = false;
cfg.general.modes.secure = false;
}
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let server_task = {
let config = config.clone();
let stats = stats.clone();
let upstream_manager = upstream_manager.clone();
let replay_checker = replay_checker.clone();
let buffer_pool = buffer_pool.clone();
let rng = rng.clone();
let route_runtime = route_runtime.clone();
let ip_tracker = ip_tracker.clone();
let beobachten = beobachten.clone();
tokio::spawn(async move {
let (stream, peer) = front_listener.accept().await.unwrap();
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
ClientHandler::new(
stream,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
real_peer_report,
)
.run()
.await
})
};
let probe = match class {
ProbeClass::MalformedTlsTruncation => malformed_tls_probe(),
ProbeClass::PlainWebBaseline => plain_web_probe(),
};
let mut client = TcpStream::connect(front_addr).await.unwrap();
let started = Instant::now();
client.write_all(&probe).await.unwrap();
client.shutdown().await.unwrap();
let mut observed = vec![0u8; REPLY_404.len()];
tokio::time::timeout(Duration::from_secs(2), client.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, REPLY_404);
tokio::time::timeout(Duration::from_secs(2), mask_accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), server_task)
.await
.unwrap()
.unwrap();
started.elapsed().as_millis()
}
#[tokio::test]
async fn differential_timing_generic_malformed_tls_vs_plain_web_mask_profile_similar() {
const ITER: usize = 24;
const BUCKET_MS: u128 = 20;
let mut malformed = Vec::with_capacity(ITER);
let mut plain = Vec::with_capacity(ITER);
for _ in 0..ITER {
malformed.push(run_generic_once(ProbeClass::MalformedTlsTruncation).await);
plain.push(run_generic_once(ProbeClass::PlainWebBaseline).await);
}
let (m_mean, m_min, m_p95, m_max) = summarize(&malformed);
let (p_mean, p_min, p_p95, p_max) = summarize(&plain);
println!(
"TIMING_DIFF generic class=malformed mean_ms={:.2} min_ms={} p95_ms={} max_ms={} bucket_mean={} bucket_p95={}",
m_mean,
m_min,
m_p95,
m_max,
(m_mean as u128) / BUCKET_MS,
m_p95 / BUCKET_MS
);
println!(
"TIMING_DIFF generic class=plain_web mean_ms={:.2} min_ms={} p95_ms={} max_ms={} bucket_mean={} bucket_p95={}",
p_mean,
p_min,
p_p95,
p_max,
(p_mean as u128) / BUCKET_MS,
p_p95 / BUCKET_MS
);
let mean_bucket_delta = ((m_mean as i128) - (p_mean as i128)).abs() / (BUCKET_MS as i128);
let p95_bucket_delta = ((m_p95 as i128) - (p_p95 as i128)).abs() / (BUCKET_MS as i128);
assert!(
mean_bucket_delta <= 1,
"generic timing mean diverged: malformed_mean_ms={:.2}, plain_mean_ms={:.2}",
m_mean,
p_mean
);
assert!(
p95_bucket_delta <= 2,
"generic timing p95 diverged: malformed_p95_ms={}, plain_p95_ms={}",
m_p95,
p_p95
);
}
#[tokio::test]
async fn differential_timing_client_handler_malformed_tls_vs_plain_web_mask_profile_similar() {
const ITER: usize = 16;
const BUCKET_MS: u128 = 20;
let mut malformed = Vec::with_capacity(ITER);
let mut plain = Vec::with_capacity(ITER);
for _ in 0..ITER {
malformed.push(run_client_handler_once(ProbeClass::MalformedTlsTruncation).await);
plain.push(run_client_handler_once(ProbeClass::PlainWebBaseline).await);
}
let (m_mean, m_min, m_p95, m_max) = summarize(&malformed);
let (p_mean, p_min, p_p95, p_max) = summarize(&plain);
println!(
"TIMING_DIFF handler class=malformed mean_ms={:.2} min_ms={} p95_ms={} max_ms={} bucket_mean={} bucket_p95={}",
m_mean,
m_min,
m_p95,
m_max,
(m_mean as u128) / BUCKET_MS,
m_p95 / BUCKET_MS
);
println!(
"TIMING_DIFF handler class=plain_web mean_ms={:.2} min_ms={} p95_ms={} max_ms={} bucket_mean={} bucket_p95={}",
p_mean,
p_min,
p_p95,
p_max,
(p_mean as u128) / BUCKET_MS,
p_p95 / BUCKET_MS
);
let mean_bucket_delta = ((m_mean as i128) - (p_mean as i128)).abs() / (BUCKET_MS as i128);
let p95_bucket_delta = ((m_p95 as i128) - (p_p95 as i128)).abs() / (BUCKET_MS as i128);
assert!(
mean_bucket_delta <= 1,
"handler timing mean diverged: malformed_mean_ms={:.2}, plain_mean_ms={:.2}",
m_mean,
p_mean
);
assert!(
p95_bucket_delta <= 2,
"handler timing p95 diverged: malformed_p95_ms={}, plain_p95_ms={}",
m_p95,
p_p95
);
}

View File

@ -0,0 +1,200 @@
//! TLS ClientHello size validation tests for proxy anti-censorship security
//! Covers positive, negative, edge, adversarial, and fuzz cases.
//! Ensures proxy does not reveal itself on probe failures.
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::protocol::constants::{MAX_TLS_RECORD_SIZE, MIN_TLS_CLIENT_HELLO_SIZE};
use std::net::SocketAddr;
use std::time::Duration;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpListener;
fn test_probe_for_len(len: usize) -> [u8; 5] {
[
0x16,
0x03,
0x03,
((len >> 8) & 0xff) as u8,
(len & 0xff) as u8,
]
}
fn make_test_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats,
))
}
async fn run_probe_and_assert_masking(len: usize, expect_bad_increment: bool) {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let probe = test_probe_for_len(len);
let backend_reply = b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec();
let accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(got, probe, "mask backend must receive original probe bytes");
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = backend_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let bad_before = stats.get_connects_bad();
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let (server_side, mut client_side) = duplex(4096);
let peer: SocketAddr = "203.0.113.123:55123".parse().unwrap();
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
config,
stats.clone(),
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
));
client_side.write_all(&probe).await.unwrap();
let mut observed = vec![0u8; backend_reply.len()];
client_side.read_exact(&mut observed).await.unwrap();
assert_eq!(observed, backend_reply, "invalid TLS path must be masked as a real site");
drop(client_side);
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
.await
.unwrap()
.unwrap();
accept_task.await.unwrap();
let expected_bad = if expect_bad_increment { bad_before + 1 } else { bad_before };
assert_eq!(
stats.get_connects_bad(),
expected_bad,
"unexpected connects_bad classification for tls_len={len}"
);
}
#[tokio::test]
async fn tls_client_hello_lower_bound_minus_one_is_masked_and_counted_bad() {
run_probe_and_assert_masking(MIN_TLS_CLIENT_HELLO_SIZE - 1, true).await;
}
#[tokio::test]
async fn tls_client_hello_upper_bound_plus_one_is_masked_and_counted_bad() {
run_probe_and_assert_masking(MAX_TLS_RECORD_SIZE + 1, true).await;
}
#[tokio::test]
async fn tls_client_hello_header_zero_len_is_masked_and_counted_bad() {
run_probe_and_assert_masking(0, true).await;
}
#[test]
fn tls_client_hello_len_bounds_unit_adversarial_sweep() {
let cases = [
(0usize, false),
(1usize, false),
(99usize, false),
(100usize, true),
(101usize, true),
(511usize, true),
(512usize, true),
(16_383usize, true),
(16_384usize, true),
(16_385usize, false),
(u16::MAX as usize, false),
(usize::MAX, false),
];
for (len, expected) in cases {
assert_eq!(
tls_clienthello_len_in_bounds(len),
expected,
"unexpected bounds result for tls_len={len}"
);
}
}
#[test]
fn tls_client_hello_len_bounds_light_fuzz_deterministic_lcg() {
let mut x: u32 = 0xA5A5_5A5A;
for _ in 0..2_048 {
x = x.wrapping_mul(1_664_525).wrapping_add(1_013_904_223);
let base = (x as usize) & 0x3fff;
let len = match x & 0x7 {
0 => MIN_TLS_CLIENT_HELLO_SIZE - 1,
1 => MIN_TLS_CLIENT_HELLO_SIZE,
2 => MIN_TLS_CLIENT_HELLO_SIZE + 1,
3 => MAX_TLS_RECORD_SIZE - 1,
4 => MAX_TLS_RECORD_SIZE,
5 => MAX_TLS_RECORD_SIZE + 1,
_ => base,
};
let expect_bad = !(MIN_TLS_CLIENT_HELLO_SIZE..=MAX_TLS_RECORD_SIZE).contains(&len);
assert_eq!(
tls_clienthello_len_in_bounds(len),
!expect_bad,
"deterministic fuzz mismatch for tls_len={len}"
);
}
}
#[test]
fn tls_client_hello_len_bounds_stress_many_evaluations() {
for _ in 0..100_000 {
assert!(tls_clienthello_len_in_bounds(MIN_TLS_CLIENT_HELLO_SIZE));
assert!(tls_clienthello_len_in_bounds(MAX_TLS_RECORD_SIZE));
assert!(!tls_clienthello_len_in_bounds(MIN_TLS_CLIENT_HELLO_SIZE - 1));
assert!(!tls_clienthello_len_in_bounds(MAX_TLS_RECORD_SIZE + 1));
}
}
#[tokio::test]
async fn tls_client_hello_masking_integration_repeated_small_probes() {
for _ in 0..25 {
run_probe_and_assert_masking(MIN_TLS_CLIENT_HELLO_SIZE - 1, true).await;
}
}

View File

@ -0,0 +1,561 @@
//! Black-hat adversarial tests for truncated in-range TLS ClientHello probes.
//! These tests encode a strict anti-probing expectation: malformed TLS traffic
//! should still be masked as a legitimate website response.
use super::*;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::protocol::constants::MIN_TLS_CLIENT_HELLO_SIZE;
use std::net::SocketAddr;
use std::time::Duration;
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::time::sleep;
fn in_range_probe_header() -> [u8; 5] {
[
0x16,
0x03,
0x03,
((MIN_TLS_CLIENT_HELLO_SIZE >> 8) & 0xff) as u8,
(MIN_TLS_CLIENT_HELLO_SIZE & 0xff) as u8,
]
}
fn make_test_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
Arc::new(UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
1,
1,
1,
false,
stats,
))
}
fn truncated_in_range_record(actual_body_len: usize) -> Vec<u8> {
let mut out = in_range_probe_header().to_vec();
out.extend(std::iter::repeat_n(0x41, actual_body_len));
out
}
async fn write_fragmented<W: AsyncWriteExt + Unpin>(writer: &mut W, bytes: &[u8], chunks: &[usize], delay_ms: u64) {
let mut offset = 0usize;
for &chunk in chunks {
if offset >= bytes.len() {
break;
}
let end = (offset + chunk).min(bytes.len());
writer.write_all(&bytes[offset..end]).await.unwrap();
offset = end;
if delay_ms > 0 {
sleep(Duration::from_millis(delay_ms)).await;
}
}
if offset < bytes.len() {
writer.write_all(&bytes[offset..]).await.unwrap();
}
}
async fn run_blackhat_generic_fragmented_probe_should_mask(
payload: Vec<u8>,
chunks: &[usize],
delay_ms: u64,
backend_reply: Vec<u8>,
) {
let mask_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let mask_addr = mask_listener.local_addr().unwrap();
let probe_header = in_range_probe_header();
let mask_accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = mask_listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(got, probe_header);
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let (server_side, mut client_side) = duplex(4096);
let peer: SocketAddr = "203.0.113.202:55002".parse().unwrap();
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
));
write_fragmented(&mut client_side, &payload, chunks, delay_ms).await;
client_side.shutdown().await.unwrap();
let mut observed = vec![0u8; backend_reply.len()];
tokio::time::timeout(Duration::from_secs(2), client_side.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, backend_reply);
tokio::time::timeout(Duration::from_secs(2), mask_accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), handler)
.await
.unwrap()
.unwrap();
}
async fn run_blackhat_client_handler_fragmented_probe_should_mask(
payload: Vec<u8>,
chunks: &[usize],
delay_ms: u64,
backend_reply: Vec<u8>,
) {
let mask_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let mask_addr = mask_listener.local_addr().unwrap();
let front_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let front_addr = front_listener.local_addr().unwrap();
let probe_header = in_range_probe_header();
let mask_accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = mask_listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(got, probe_header);
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let server_task = {
let config = config.clone();
let stats = stats.clone();
let upstream_manager = upstream_manager.clone();
let replay_checker = replay_checker.clone();
let buffer_pool = buffer_pool.clone();
let rng = rng.clone();
let route_runtime = route_runtime.clone();
let ip_tracker = ip_tracker.clone();
let beobachten = beobachten.clone();
tokio::spawn(async move {
let (stream, peer) = front_listener.accept().await.unwrap();
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
ClientHandler::new(
stream,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
real_peer_report,
)
.run()
.await
})
};
let mut client = TcpStream::connect(front_addr).await.unwrap();
write_fragmented(&mut client, &payload, chunks, delay_ms).await;
client.shutdown().await.unwrap();
let mut observed = vec![0u8; backend_reply.len()];
tokio::time::timeout(Duration::from_secs(2), client.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, backend_reply);
tokio::time::timeout(Duration::from_secs(2), mask_accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), server_task)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn blackhat_truncated_in_range_clienthello_generic_stream_should_mask_but_leaks() {
let mask_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let mask_addr = mask_listener.local_addr().unwrap();
let backend_reply = b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec();
let probe = in_range_probe_header();
let mask_accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = mask_listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(got, probe);
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let (server_side, mut client_side) = duplex(4096);
let peer: SocketAddr = "203.0.113.201:55001".parse().unwrap();
let handler = tokio::spawn(handle_client_stream(
server_side,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
));
client_side.write_all(&probe).await.unwrap();
client_side.shutdown().await.unwrap();
// Security expectation: even malformed in-range TLS should be masked.
// Current code leaks by returning EOF/timeout instead of masking.
let mut observed = vec![0u8; backend_reply.len()];
tokio::time::timeout(Duration::from_secs(2), client_side.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, backend_reply);
tokio::time::timeout(Duration::from_secs(2), mask_accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), handler)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn blackhat_truncated_in_range_clienthello_client_handler_should_mask_but_leaks() {
let mask_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let mask_addr = mask_listener.local_addr().unwrap();
let front_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let front_addr = front_listener.local_addr().unwrap();
let backend_reply = b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec();
let probe = in_range_probe_header();
let mask_accept_task = tokio::spawn({
let backend_reply = backend_reply.clone();
async move {
let (mut stream, _) = mask_listener.accept().await.unwrap();
let mut got = [0u8; 5];
stream.read_exact(&mut got).await.unwrap();
assert_eq!(got, probe);
stream.write_all(&backend_reply).await.unwrap();
}
});
let mut cfg = ProxyConfig::default();
cfg.general.beobachten = false;
cfg.timeouts.client_handshake = 1;
cfg.censorship.mask = true;
cfg.censorship.mask_unix_sock = None;
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
cfg.censorship.mask_port = mask_addr.port();
cfg.censorship.mask_proxy_protocol = 0;
let config = Arc::new(cfg);
let stats = Arc::new(Stats::new());
let upstream_manager = make_test_upstream_manager(stats.clone());
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
let buffer_pool = Arc::new(BufferPool::new());
let rng = Arc::new(SecureRandom::new());
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
let ip_tracker = Arc::new(UserIpTracker::new());
let beobachten = Arc::new(BeobachtenStore::new());
let server_task = {
let config = config.clone();
let stats = stats.clone();
let upstream_manager = upstream_manager.clone();
let replay_checker = replay_checker.clone();
let buffer_pool = buffer_pool.clone();
let rng = rng.clone();
let route_runtime = route_runtime.clone();
let ip_tracker = ip_tracker.clone();
let beobachten = beobachten.clone();
tokio::spawn(async move {
let (stream, peer) = front_listener.accept().await.unwrap();
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
ClientHandler::new(
stream,
peer,
config,
stats,
upstream_manager,
replay_checker,
buffer_pool,
rng,
None,
route_runtime,
None,
ip_tracker,
beobachten,
false,
real_peer_report,
)
.run()
.await
})
};
let mut client = TcpStream::connect(front_addr).await.unwrap();
client.write_all(&probe).await.unwrap();
client.shutdown().await.unwrap();
// Security expectation: malformed in-range TLS should still be masked.
let mut observed = vec![0u8; backend_reply.len()];
tokio::time::timeout(Duration::from_secs(2), client.read_exact(&mut observed))
.await
.unwrap()
.unwrap();
assert_eq!(observed, backend_reply);
tokio::time::timeout(Duration::from_secs(2), mask_accept_task)
.await
.unwrap()
.unwrap();
let _ = tokio::time::timeout(Duration::from_secs(2), server_task)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn blackhat_generic_truncated_min_body_1_should_mask_but_leaks() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(1),
&[6],
0,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_generic_truncated_min_body_8_should_mask_but_leaks() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(8),
&[13],
0,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_generic_truncated_min_body_99_should_mask_but_leaks() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(MIN_TLS_CLIENT_HELLO_SIZE - 1),
&[5, MIN_TLS_CLIENT_HELLO_SIZE - 1],
0,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_generic_fragmented_header_then_close_should_mask_but_leaks() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(0),
&[1, 1, 1, 1, 1],
0,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_generic_fragmented_header_plus_partial_body_should_mask_but_leaks() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(5),
&[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
0,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_generic_slowloris_fragmented_min_probe_should_mask_but_times_out() {
run_blackhat_generic_fragmented_probe_should_mask(
truncated_in_range_record(1),
&[1, 1, 1, 1, 1, 1],
250,
b"HTTP/1.1 404 Not Found\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_truncated_min_body_1_should_mask_but_leaks() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(1),
&[6],
0,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_truncated_min_body_8_should_mask_but_leaks() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(8),
&[13],
0,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_truncated_min_body_99_should_mask_but_leaks() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(MIN_TLS_CLIENT_HELLO_SIZE - 1),
&[5, MIN_TLS_CLIENT_HELLO_SIZE - 1],
0,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_fragmented_header_then_close_should_mask_but_leaks() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(0),
&[1, 1, 1, 1, 1],
0,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_fragmented_header_plus_partial_body_should_mask_but_leaks() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(5),
&[1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
0,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}
#[tokio::test]
async fn blackhat_client_handler_slowloris_fragmented_min_probe_should_mask_but_times_out() {
run_blackhat_client_handler_fragmented_probe_should_mask(
truncated_in_range_record(1),
&[1, 1, 1, 1, 1, 1],
250,
b"HTTP/1.1 403 Forbidden\r\nContent-Length: 0\r\n\r\n".to_vec(),
)
.await;
}