mirror of https://github.com/telemt/telemt.git
Refactor TLS fallback tests to remove unnecessary client hello assertions
- Removed assertions for expected client hello messages in multiple TLS fallback tests to streamline the test logic. - Updated the tests to focus on verifying the trailing TLS records received after the fallback. - Enhanced the masking functionality by adding shape hardening features, including dynamic padding based on sent data size. - Modified the relay_to_mask function to accommodate new parameters for shape hardening. - Updated masking security tests to reflect changes in the relay_to_mask function signature.
This commit is contained in:
parent
3abde52de8
commit
0eca535955
|
|
@ -260,6 +260,38 @@ This document lists all configuration keys accepted by `config.toml`.
|
||||||
| tls_full_cert_ttl_secs | `u64` | `90` | — | TTL for sending full cert payload per (domain, client IP) tuple. |
|
| tls_full_cert_ttl_secs | `u64` | `90` | — | TTL for sending full cert payload per (domain, client IP) tuple. |
|
||||||
| alpn_enforce | `bool` | `true` | — | Enforces ALPN echo behavior based on client preference. |
|
| alpn_enforce | `bool` | `true` | — | Enforces ALPN echo behavior based on client preference. |
|
||||||
| mask_proxy_protocol | `u8` | `0` | — | PROXY protocol mode for mask backend (`0` disabled, `1` v1, `2` v2). |
|
| mask_proxy_protocol | `u8` | `0` | — | PROXY protocol mode for mask backend (`0` disabled, `1` v1, `2` v2). |
|
||||||
|
| mask_shape_hardening | `bool` | `false` | — | Enables client->mask shape-channel hardening by applying controlled tail padding to bucket boundaries on mask relay shutdown. |
|
||||||
|
| mask_shape_bucket_floor_bytes | `usize` | `512` | Must be `> 0`; should be `<= mask_shape_bucket_cap_bytes`. | Minimum bucket size used by shape-channel hardening. |
|
||||||
|
| mask_shape_bucket_cap_bytes | `usize` | `4096` | Must be `>= mask_shape_bucket_floor_bytes`. | Maximum bucket size used by shape-channel hardening; traffic above cap is not padded further. |
|
||||||
|
|
||||||
|
### Shape-channel hardening notes (`[censorship]`)
|
||||||
|
|
||||||
|
These parameters are designed to reduce one specific fingerprint source during masking: the exact number of bytes sent from proxy to `mask_host` for invalid or probing traffic.
|
||||||
|
|
||||||
|
Without hardening, a censor can often correlate probe input length with backend-observed length very precisely (for example: `5 + body_sent` on early TLS reject paths). That creates a length-based classifier signal.
|
||||||
|
|
||||||
|
When `mask_shape_hardening = true`, Telemt pads the **client->mask** stream tail to a bucket boundary at relay shutdown:
|
||||||
|
|
||||||
|
- Total bytes sent to mask are first measured.
|
||||||
|
- A bucket is selected using powers of two starting from `mask_shape_bucket_floor_bytes`.
|
||||||
|
- Padding is added only if total bytes are below `mask_shape_bucket_cap_bytes`.
|
||||||
|
- If bytes already exceed cap, no extra padding is added.
|
||||||
|
|
||||||
|
This means multiple nearby probe sizes collapse into the same backend-observed size class, making active classification harder.
|
||||||
|
|
||||||
|
Practical trade-offs:
|
||||||
|
|
||||||
|
- Better anti-fingerprinting on size/shape channel.
|
||||||
|
- Slightly higher egress overhead for small probes due to padding.
|
||||||
|
- Behavior is intentionally conservative and disabled by default.
|
||||||
|
|
||||||
|
Recommended starting profile:
|
||||||
|
|
||||||
|
- `mask_shape_hardening = true`
|
||||||
|
- `mask_shape_bucket_floor_bytes = 512`
|
||||||
|
- `mask_shape_bucket_cap_bytes = 4096`
|
||||||
|
|
||||||
|
If your backend or network is very bandwidth-constrained, reduce cap first. If probes are still too distinguishable in your environment, increase floor gradually.
|
||||||
|
|
||||||
## [access]
|
## [access]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -514,6 +514,18 @@ pub(crate) fn default_alpn_enforce() -> bool {
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_mask_shape_hardening() -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_mask_shape_bucket_floor_bytes() -> usize {
|
||||||
|
512
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_mask_shape_bucket_cap_bytes() -> usize {
|
||||||
|
4096
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_stun_servers() -> Vec<String> {
|
pub(crate) fn default_stun_servers() -> Vec<String> {
|
||||||
vec![
|
vec![
|
||||||
"stun.l.google.com:5349".to_string(),
|
"stun.l.google.com:5349".to_string(),
|
||||||
|
|
|
||||||
|
|
@ -580,6 +580,11 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
||||||
|| old.censorship.tls_full_cert_ttl_secs != new.censorship.tls_full_cert_ttl_secs
|
|| old.censorship.tls_full_cert_ttl_secs != new.censorship.tls_full_cert_ttl_secs
|
||||||
|| old.censorship.alpn_enforce != new.censorship.alpn_enforce
|
|| old.censorship.alpn_enforce != new.censorship.alpn_enforce
|
||||||
|| old.censorship.mask_proxy_protocol != new.censorship.mask_proxy_protocol
|
|| old.censorship.mask_proxy_protocol != new.censorship.mask_proxy_protocol
|
||||||
|
|| old.censorship.mask_shape_hardening != new.censorship.mask_shape_hardening
|
||||||
|
|| old.censorship.mask_shape_bucket_floor_bytes
|
||||||
|
!= new.censorship.mask_shape_bucket_floor_bytes
|
||||||
|
|| old.censorship.mask_shape_bucket_cap_bytes
|
||||||
|
!= new.censorship.mask_shape_bucket_cap_bytes
|
||||||
{
|
{
|
||||||
warned = true;
|
warned = true;
|
||||||
warn!("config reload: censorship settings changed; restart required");
|
warn!("config reload: censorship settings changed; restart required");
|
||||||
|
|
|
||||||
|
|
@ -1394,6 +1394,19 @@ pub struct AntiCensorshipConfig {
|
||||||
/// Allows the backend to see the real client IP.
|
/// Allows the backend to see the real client IP.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub mask_proxy_protocol: u8,
|
pub mask_proxy_protocol: u8,
|
||||||
|
|
||||||
|
/// Enable shape-channel hardening on mask backend path by padding
|
||||||
|
/// client->mask stream tail to configured buckets on stream end.
|
||||||
|
#[serde(default = "default_mask_shape_hardening")]
|
||||||
|
pub mask_shape_hardening: bool,
|
||||||
|
|
||||||
|
/// Minimum bucket size for mask shape hardening padding.
|
||||||
|
#[serde(default = "default_mask_shape_bucket_floor_bytes")]
|
||||||
|
pub mask_shape_bucket_floor_bytes: usize,
|
||||||
|
|
||||||
|
/// Maximum bucket size for mask shape hardening padding.
|
||||||
|
#[serde(default = "default_mask_shape_bucket_cap_bytes")]
|
||||||
|
pub mask_shape_bucket_cap_bytes: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for AntiCensorshipConfig {
|
impl Default for AntiCensorshipConfig {
|
||||||
|
|
@ -1415,6 +1428,9 @@ impl Default for AntiCensorshipConfig {
|
||||||
tls_full_cert_ttl_secs: default_tls_full_cert_ttl_secs(),
|
tls_full_cert_ttl_secs: default_tls_full_cert_ttl_secs(),
|
||||||
alpn_enforce: default_alpn_enforce(),
|
alpn_enforce: default_alpn_enforce(),
|
||||||
mask_proxy_protocol: 0,
|
mask_proxy_protocol: 0,
|
||||||
|
mask_shape_hardening: default_mask_shape_hardening(),
|
||||||
|
mask_shape_bucket_floor_bytes: default_mask_shape_bucket_floor_bytes(),
|
||||||
|
mask_shape_bucket_cap_bytes: default_mask_shape_bucket_cap_bytes(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,6 +8,7 @@ use std::sync::OnceLock;
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use ipnetwork::IpNetwork;
|
use ipnetwork::IpNetwork;
|
||||||
|
use rand::Rng;
|
||||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite};
|
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite};
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
|
|
@ -20,8 +21,8 @@ type PostHandshakeFuture = Pin<Box<dyn Future<Output = Result<()>> + Send>>;
|
||||||
enum HandshakeOutcome {
|
enum HandshakeOutcome {
|
||||||
/// Handshake succeeded, relay work to do (outside timeout)
|
/// Handshake succeeded, relay work to do (outside timeout)
|
||||||
NeedsRelay(PostHandshakeFuture),
|
NeedsRelay(PostHandshakeFuture),
|
||||||
/// Already fully handled (bad client masking, etc.)
|
/// Handshake failed and masking must run outside handshake timeout budget
|
||||||
Handled,
|
NeedsMasking(PostHandshakeFuture),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[must_use = "UserConnectionReservation must be kept alive to retain user/IP reservation until release or drop"]
|
#[must_use = "UserConnectionReservation must be kept alive to retain user/IP reservation until release or drop"]
|
||||||
|
|
@ -130,6 +131,24 @@ async fn read_with_progress<R: AsyncRead + Unpin>(reader: &mut R, mut buf: &mut
|
||||||
Ok(total)
|
Ok(total)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn maybe_apply_mask_reject_delay(config: &ProxyConfig) {
|
||||||
|
let min = config.censorship.server_hello_delay_min_ms;
|
||||||
|
let max = config.censorship.server_hello_delay_max_ms;
|
||||||
|
if max == 0 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let delay_ms = if min >= max {
|
||||||
|
max
|
||||||
|
} else {
|
||||||
|
rand::rng().random_range(min..=max)
|
||||||
|
};
|
||||||
|
|
||||||
|
if delay_ms > 0 {
|
||||||
|
tokio::time::sleep(Duration::from_millis(delay_ms)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn handshake_timeout_with_mask_grace(config: &ProxyConfig) -> Duration {
|
fn handshake_timeout_with_mask_grace(config: &ProxyConfig) -> Duration {
|
||||||
let base = Duration::from_secs(config.timeouts.client_handshake);
|
let base = Duration::from_secs(config.timeouts.client_handshake);
|
||||||
if config.censorship.mask {
|
if config.censorship.mask {
|
||||||
|
|
@ -139,6 +158,34 @@ fn handshake_timeout_with_mask_grace(config: &ProxyConfig) -> Duration {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn masking_outcome<R, W>(
|
||||||
|
reader: R,
|
||||||
|
writer: W,
|
||||||
|
initial_data: Vec<u8>,
|
||||||
|
peer: SocketAddr,
|
||||||
|
local_addr: SocketAddr,
|
||||||
|
config: Arc<ProxyConfig>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
) -> HandshakeOutcome
|
||||||
|
where
|
||||||
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
|
W: AsyncWrite + Unpin + Send + 'static,
|
||||||
|
{
|
||||||
|
HandshakeOutcome::NeedsMasking(Box::pin(async move {
|
||||||
|
handle_bad_client(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
&initial_data,
|
||||||
|
peer,
|
||||||
|
local_addr,
|
||||||
|
&config,
|
||||||
|
&beobachten,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
Ok(())
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
fn record_beobachten_class(
|
fn record_beobachten_class(
|
||||||
beobachten: &BeobachtenStore,
|
beobachten: &BeobachtenStore,
|
||||||
config: &ProxyConfig,
|
config: &ProxyConfig,
|
||||||
|
|
@ -283,18 +330,17 @@ where
|
||||||
if !tls_clienthello_len_in_bounds(tls_len) {
|
if !tls_clienthello_len_in_bounds(tls_len) {
|
||||||
debug!(peer = %real_peer, tls_len = tls_len, max_tls_len = MAX_TLS_PLAINTEXT_SIZE, "TLS handshake length out of bounds");
|
debug!(peer = %real_peer, tls_len = tls_len, max_tls_len = MAX_TLS_PLAINTEXT_SIZE, "TLS handshake length out of bounds");
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
|
maybe_apply_mask_reject_delay(&config).await;
|
||||||
let (reader, writer) = tokio::io::split(stream);
|
let (reader, writer) = tokio::io::split(stream);
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&first_bytes,
|
first_bytes.to_vec(),
|
||||||
real_peer,
|
real_peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&config,
|
config.clone(),
|
||||||
&beobachten,
|
beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut handshake = vec![0u8; 5 + tls_len];
|
let mut handshake = vec![0u8; 5 + tls_len];
|
||||||
|
|
@ -304,38 +350,36 @@ where
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(peer = %real_peer, error = %e, tls_len = tls_len, "TLS ClientHello body read failed; engaging masking fallback");
|
debug!(peer = %real_peer, error = %e, tls_len = tls_len, "TLS ClientHello body read failed; engaging masking fallback");
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
|
maybe_apply_mask_reject_delay(&config).await;
|
||||||
let initial_len = 5;
|
let initial_len = 5;
|
||||||
let (reader, writer) = tokio::io::split(stream);
|
let (reader, writer) = tokio::io::split(stream);
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&handshake[..initial_len],
|
handshake[..initial_len].to_vec(),
|
||||||
real_peer,
|
real_peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&config,
|
config.clone(),
|
||||||
&beobachten,
|
beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if body_read < tls_len {
|
if body_read < tls_len {
|
||||||
debug!(peer = %real_peer, got = body_read, expected = tls_len, "Truncated in-range TLS ClientHello; engaging masking fallback");
|
debug!(peer = %real_peer, got = body_read, expected = tls_len, "Truncated in-range TLS ClientHello; engaging masking fallback");
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
|
maybe_apply_mask_reject_delay(&config).await;
|
||||||
let initial_len = 5 + body_read;
|
let initial_len = 5 + body_read;
|
||||||
let (reader, writer) = tokio::io::split(stream);
|
let (reader, writer) = tokio::io::split(stream);
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&handshake[..initial_len],
|
handshake[..initial_len].to_vec(),
|
||||||
real_peer,
|
real_peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&config,
|
config.clone(),
|
||||||
&beobachten,
|
beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let (read_half, write_half) = tokio::io::split(stream);
|
let (read_half, write_half) = tokio::io::split(stream);
|
||||||
|
|
@ -347,17 +391,15 @@ where
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&handshake,
|
handshake.clone(),
|
||||||
real_peer,
|
real_peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&config,
|
config.clone(),
|
||||||
&beobachten,
|
beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
@ -389,17 +431,15 @@ where
|
||||||
peer = %peer,
|
peer = %peer,
|
||||||
"Authenticated TLS session failed MTProto validation; engaging masking fallback"
|
"Authenticated TLS session failed MTProto validation; engaging masking fallback"
|
||||||
);
|
);
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&handshake,
|
Vec::new(),
|
||||||
real_peer,
|
real_peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&config,
|
config.clone(),
|
||||||
&beobachten,
|
beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
@ -416,18 +456,17 @@ where
|
||||||
if !config.general.modes.classic && !config.general.modes.secure {
|
if !config.general.modes.classic && !config.general.modes.secure {
|
||||||
debug!(peer = %real_peer, "Non-TLS modes disabled");
|
debug!(peer = %real_peer, "Non-TLS modes disabled");
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
|
maybe_apply_mask_reject_delay(&config).await;
|
||||||
let (reader, writer) = tokio::io::split(stream);
|
let (reader, writer) = tokio::io::split(stream);
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&first_bytes,
|
first_bytes.to_vec(),
|
||||||
real_peer,
|
real_peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&config,
|
config.clone(),
|
||||||
&beobachten,
|
beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut handshake = [0u8; HANDSHAKE_LEN];
|
let mut handshake = [0u8; HANDSHAKE_LEN];
|
||||||
|
|
@ -443,17 +482,15 @@ where
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&handshake,
|
handshake.to_vec(),
|
||||||
real_peer,
|
real_peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&config,
|
config.clone(),
|
||||||
&beobachten,
|
beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
@ -503,8 +540,7 @@ where
|
||||||
|
|
||||||
// Phase 2: relay (WITHOUT handshake timeout — relay has its own activity timeouts)
|
// Phase 2: relay (WITHOUT handshake timeout — relay has its own activity timeouts)
|
||||||
match outcome {
|
match outcome {
|
||||||
HandshakeOutcome::NeedsRelay(fut) => fut.await,
|
HandshakeOutcome::NeedsRelay(fut) | HandshakeOutcome::NeedsMasking(fut) => fut.await,
|
||||||
HandshakeOutcome::Handled => Ok(()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -617,8 +653,7 @@ impl RunningClientHandler {
|
||||||
|
|
||||||
// Phase 2: relay (WITHOUT handshake timeout — relay has its own activity timeouts)
|
// Phase 2: relay (WITHOUT handshake timeout — relay has its own activity timeouts)
|
||||||
match outcome {
|
match outcome {
|
||||||
HandshakeOutcome::NeedsRelay(fut) => fut.await,
|
HandshakeOutcome::NeedsRelay(fut) | HandshakeOutcome::NeedsMasking(fut) => fut.await,
|
||||||
HandshakeOutcome::Handled => Ok(()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -731,18 +766,17 @@ impl RunningClientHandler {
|
||||||
if !tls_clienthello_len_in_bounds(tls_len) {
|
if !tls_clienthello_len_in_bounds(tls_len) {
|
||||||
debug!(peer = %peer, tls_len = tls_len, max_tls_len = MAX_TLS_PLAINTEXT_SIZE, "TLS handshake length out of bounds");
|
debug!(peer = %peer, tls_len = tls_len, max_tls_len = MAX_TLS_PLAINTEXT_SIZE, "TLS handshake length out of bounds");
|
||||||
self.stats.increment_connects_bad();
|
self.stats.increment_connects_bad();
|
||||||
|
maybe_apply_mask_reject_delay(&self.config).await;
|
||||||
let (reader, writer) = self.stream.into_split();
|
let (reader, writer) = self.stream.into_split();
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&first_bytes,
|
first_bytes.to_vec(),
|
||||||
peer,
|
peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&self.config,
|
self.config.clone(),
|
||||||
&self.beobachten,
|
self.beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut handshake = vec![0u8; 5 + tls_len];
|
let mut handshake = vec![0u8; 5 + tls_len];
|
||||||
|
|
@ -752,37 +786,35 @@ impl RunningClientHandler {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(peer = %peer, error = %e, tls_len = tls_len, "TLS ClientHello body read failed; engaging masking fallback");
|
debug!(peer = %peer, error = %e, tls_len = tls_len, "TLS ClientHello body read failed; engaging masking fallback");
|
||||||
self.stats.increment_connects_bad();
|
self.stats.increment_connects_bad();
|
||||||
|
maybe_apply_mask_reject_delay(&self.config).await;
|
||||||
let (reader, writer) = self.stream.into_split();
|
let (reader, writer) = self.stream.into_split();
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&handshake[..5],
|
handshake[..5].to_vec(),
|
||||||
peer,
|
peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&self.config,
|
self.config.clone(),
|
||||||
&self.beobachten,
|
self.beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
if body_read < tls_len {
|
if body_read < tls_len {
|
||||||
debug!(peer = %peer, got = body_read, expected = tls_len, "Truncated in-range TLS ClientHello; engaging masking fallback");
|
debug!(peer = %peer, got = body_read, expected = tls_len, "Truncated in-range TLS ClientHello; engaging masking fallback");
|
||||||
self.stats.increment_connects_bad();
|
self.stats.increment_connects_bad();
|
||||||
|
maybe_apply_mask_reject_delay(&self.config).await;
|
||||||
let initial_len = 5 + body_read;
|
let initial_len = 5 + body_read;
|
||||||
let (reader, writer) = self.stream.into_split();
|
let (reader, writer) = self.stream.into_split();
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&handshake[..initial_len],
|
handshake[..initial_len].to_vec(),
|
||||||
peer,
|
peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&self.config,
|
self.config.clone(),
|
||||||
&self.beobachten,
|
self.beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let config = self.config.clone();
|
let config = self.config.clone();
|
||||||
|
|
@ -807,17 +839,15 @@ impl RunningClientHandler {
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&handshake,
|
handshake.clone(),
|
||||||
peer,
|
peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&config,
|
config.clone(),
|
||||||
&self.beobachten,
|
self.beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
@ -858,17 +888,15 @@ impl RunningClientHandler {
|
||||||
peer = %peer,
|
peer = %peer,
|
||||||
"Authenticated TLS session failed MTProto validation; engaging masking fallback"
|
"Authenticated TLS session failed MTProto validation; engaging masking fallback"
|
||||||
);
|
);
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&handshake,
|
Vec::new(),
|
||||||
peer,
|
peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&config,
|
config.clone(),
|
||||||
&self.beobachten,
|
self.beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
@ -898,18 +926,17 @@ impl RunningClientHandler {
|
||||||
if !self.config.general.modes.classic && !self.config.general.modes.secure {
|
if !self.config.general.modes.classic && !self.config.general.modes.secure {
|
||||||
debug!(peer = %peer, "Non-TLS modes disabled");
|
debug!(peer = %peer, "Non-TLS modes disabled");
|
||||||
self.stats.increment_connects_bad();
|
self.stats.increment_connects_bad();
|
||||||
|
maybe_apply_mask_reject_delay(&self.config).await;
|
||||||
let (reader, writer) = self.stream.into_split();
|
let (reader, writer) = self.stream.into_split();
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&first_bytes,
|
first_bytes.to_vec(),
|
||||||
peer,
|
peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&self.config,
|
self.config.clone(),
|
||||||
&self.beobachten,
|
self.beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut handshake = [0u8; HANDSHAKE_LEN];
|
let mut handshake = [0u8; HANDSHAKE_LEN];
|
||||||
|
|
@ -938,17 +965,15 @@ impl RunningClientHandler {
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
handle_bad_client(
|
return Ok(masking_outcome(
|
||||||
reader,
|
reader,
|
||||||
writer,
|
writer,
|
||||||
&handshake,
|
handshake.to_vec(),
|
||||||
peer,
|
peer,
|
||||||
local_addr,
|
local_addr,
|
||||||
&config,
|
config.clone(),
|
||||||
&self.beobachten,
|
self.beobachten.clone(),
|
||||||
)
|
));
|
||||||
.await;
|
|
||||||
return Ok(HandshakeOutcome::Handled);
|
|
||||||
}
|
}
|
||||||
HandshakeResult::Error(e) => return Err(e),
|
HandshakeResult::Error(e) => return Err(e),
|
||||||
};
|
};
|
||||||
|
|
@ -1208,3 +1233,31 @@ mod tls_clienthello_truncation_adversarial_tests;
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[path = "client_timing_profile_adversarial_tests.rs"]
|
#[path = "client_timing_profile_adversarial_tests.rs"]
|
||||||
mod timing_profile_adversarial_tests;
|
mod timing_profile_adversarial_tests;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[path = "client_masking_budget_security_tests.rs"]
|
||||||
|
mod masking_budget_security_tests;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[path = "client_masking_redteam_expected_fail_tests.rs"]
|
||||||
|
mod masking_redteam_expected_fail_tests;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[path = "client_masking_hard_adversarial_tests.rs"]
|
||||||
|
mod masking_hard_adversarial_tests;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[path = "client_masking_stress_adversarial_tests.rs"]
|
||||||
|
mod masking_stress_adversarial_tests;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[path = "client_masking_blackhat_campaign_tests.rs"]
|
||||||
|
mod masking_blackhat_campaign_tests;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[path = "client_masking_diagnostics_security_tests.rs"]
|
||||||
|
mod masking_diagnostics_security_tests;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
#[path = "client_masking_shape_hardening_security_tests.rs"]
|
||||||
|
mod masking_shape_hardening_security_tests;
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,893 @@
|
||||||
|
use super::*;
|
||||||
|
use crate::config::{UpstreamConfig, UpstreamType};
|
||||||
|
use crate::crypto::sha256_hmac;
|
||||||
|
use crate::protocol::constants::{
|
||||||
|
HANDSHAKE_LEN,
|
||||||
|
MAX_TLS_PLAINTEXT_SIZE,
|
||||||
|
MIN_TLS_CLIENT_HELLO_SIZE,
|
||||||
|
TLS_RECORD_APPLICATION,
|
||||||
|
TLS_VERSION,
|
||||||
|
};
|
||||||
|
use crate::protocol::tls;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::time::{Duration, Instant};
|
||||||
|
|
||||||
|
struct CampaignHarness {
|
||||||
|
config: Arc<ProxyConfig>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||||
|
Arc::new(UpstreamManager::new(
|
||||||
|
vec![UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct {
|
||||||
|
interface: None,
|
||||||
|
bind_addresses: None,
|
||||||
|
},
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
|
}],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
stats,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_mask_harness(secret_hex: &str, mask_port: u16) -> CampaignHarness {
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = mask_port;
|
||||||
|
cfg.censorship.mask_proxy_protocol = 0;
|
||||||
|
cfg.access.ignore_time_skew = true;
|
||||||
|
cfg.access
|
||||||
|
.users
|
||||||
|
.insert("user".to_string(), secret_hex.to_string());
|
||||||
|
|
||||||
|
let config = Arc::new(cfg);
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
|
||||||
|
CampaignHarness {
|
||||||
|
config,
|
||||||
|
stats: stats.clone(),
|
||||||
|
upstream_manager: new_upstream_manager(stats),
|
||||||
|
replay_checker: Arc::new(ReplayChecker::new(1024, Duration::from_secs(60))),
|
||||||
|
buffer_pool: Arc::new(BufferPool::new()),
|
||||||
|
rng: Arc::new(SecureRandom::new()),
|
||||||
|
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
ip_tracker: Arc::new(UserIpTracker::new()),
|
||||||
|
beobachten: Arc::new(BeobachtenStore::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
|
||||||
|
assert!(tls_len <= u16::MAX as usize, "TLS length must fit into record header");
|
||||||
|
|
||||||
|
let total_len = 5 + tls_len;
|
||||||
|
let mut handshake = vec![fill; total_len];
|
||||||
|
|
||||||
|
handshake[0] = 0x16;
|
||||||
|
handshake[1] = 0x03;
|
||||||
|
handshake[2] = 0x01;
|
||||||
|
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||||
|
|
||||||
|
let session_id_len: usize = 32;
|
||||||
|
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||||
|
|
||||||
|
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||||
|
let computed = sha256_hmac(secret, &handshake);
|
||||||
|
let mut digest = computed;
|
||||||
|
let ts = timestamp.to_le_bytes();
|
||||||
|
for i in 0..4 {
|
||||||
|
digest[28 + i] ^= ts[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||||
|
.copy_from_slice(&digest);
|
||||||
|
handshake
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wrap_tls_record(record_type: u8, payload: &[u8]) -> Vec<u8> {
|
||||||
|
let mut record = Vec::with_capacity(5 + payload.len());
|
||||||
|
record.push(record_type);
|
||||||
|
record.extend_from_slice(&TLS_VERSION);
|
||||||
|
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
|
||||||
|
record.extend_from_slice(payload);
|
||||||
|
record
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
|
||||||
|
wrap_tls_record(TLS_RECORD_APPLICATION, payload)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn read_and_discard_tls_record_body<T>(stream: &mut T, header: [u8; 5])
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncRead + Unpin,
|
||||||
|
{
|
||||||
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
|
let mut body = vec![0u8; len];
|
||||||
|
stream.read_exact(&mut body).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_tls_success_mtproto_fail_capture(
|
||||||
|
harness: CampaignHarness,
|
||||||
|
peer: SocketAddr,
|
||||||
|
client_hello: Vec<u8>,
|
||||||
|
bad_mtproto_record: Vec<u8>,
|
||||||
|
trailing_records: Vec<Vec<u8>>,
|
||||||
|
expected_forward: Vec<u8>,
|
||||||
|
) {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut cfg = (*harness.config).clone();
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
let cfg = Arc::new(cfg);
|
||||||
|
|
||||||
|
let expected = expected_forward.clone();
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = vec![0u8; expected.len()];
|
||||||
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
got
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(262144);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
peer,
|
||||||
|
cfg,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&client_hello).await.unwrap();
|
||||||
|
|
||||||
|
let mut tls_response_head = [0u8; 5];
|
||||||
|
client_side.read_exact(&mut tls_response_head).await.unwrap();
|
||||||
|
assert_eq!(tls_response_head[0], 0x16);
|
||||||
|
read_and_discard_tls_record_body(&mut client_side, tls_response_head).await;
|
||||||
|
|
||||||
|
client_side.write_all(&bad_mtproto_record).await.unwrap();
|
||||||
|
for record in trailing_records {
|
||||||
|
client_side.write_all(&record).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(got, expected_forward);
|
||||||
|
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_invalid_tls_capture(config: Arc<ProxyConfig>, payload: Vec<u8>, expected: Vec<u8>) {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut cfg = (*config).clone();
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
let cfg = Arc::new(cfg);
|
||||||
|
|
||||||
|
let expected_probe = expected.clone();
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = vec![0u8; expected_probe.len()];
|
||||||
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
got
|
||||||
|
});
|
||||||
|
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
let (server_side, mut client_side) = duplex(65536);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.77:45001".parse().unwrap(),
|
||||||
|
cfg,
|
||||||
|
stats,
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&payload).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(got, expected);
|
||||||
|
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_01_tail_only_record_is_forwarded_after_tls_success_mtproto_fail() {
|
||||||
|
let secret = [0xA1u8; 16];
|
||||||
|
let harness = build_mask_harness("a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1", 1);
|
||||||
|
let client_hello = make_valid_tls_client_hello(&secret, 11, 600, 0x41);
|
||||||
|
let bad_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
let tail = wrap_tls_application_data(b"blackhat-tail-01");
|
||||||
|
|
||||||
|
run_tls_success_mtproto_fail_capture(
|
||||||
|
harness,
|
||||||
|
"198.51.100.1:55001".parse().unwrap(),
|
||||||
|
client_hello,
|
||||||
|
bad_record,
|
||||||
|
vec![tail.clone()],
|
||||||
|
tail,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_02_two_ordered_records_preserved_after_fallback() {
|
||||||
|
let secret = [0xA2u8; 16];
|
||||||
|
let harness = build_mask_harness("a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2", 1);
|
||||||
|
let client_hello = make_valid_tls_client_hello(&secret, 12, 600, 0x42);
|
||||||
|
let bad_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
let r1 = wrap_tls_application_data(b"first");
|
||||||
|
let r2 = wrap_tls_application_data(b"second");
|
||||||
|
let expected = [r1.clone(), r2.clone()].concat();
|
||||||
|
|
||||||
|
run_tls_success_mtproto_fail_capture(
|
||||||
|
harness,
|
||||||
|
"198.51.100.2:55002".parse().unwrap(),
|
||||||
|
client_hello,
|
||||||
|
bad_record,
|
||||||
|
vec![r1, r2],
|
||||||
|
expected,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_03_large_tls_application_record_survives_fallback() {
|
||||||
|
let secret = [0xA3u8; 16];
|
||||||
|
let harness = build_mask_harness("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3", 1);
|
||||||
|
let client_hello = make_valid_tls_client_hello(&secret, 13, 600, 0x43);
|
||||||
|
let bad_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
let big_payload = vec![0x5Au8; MAX_TLS_PLAINTEXT_SIZE];
|
||||||
|
let big_record = wrap_tls_application_data(&big_payload);
|
||||||
|
|
||||||
|
run_tls_success_mtproto_fail_capture(
|
||||||
|
harness,
|
||||||
|
"198.51.100.3:55003".parse().unwrap(),
|
||||||
|
client_hello,
|
||||||
|
bad_record,
|
||||||
|
vec![big_record.clone()],
|
||||||
|
big_record,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_04_coalesced_tail_in_failed_record_is_reframed_and_forwarded() {
|
||||||
|
let secret = [0xA4u8; 16];
|
||||||
|
let harness = build_mask_harness("a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4", 1);
|
||||||
|
let client_hello = make_valid_tls_client_hello(&secret, 14, 600, 0x44);
|
||||||
|
|
||||||
|
let coalesced_tail = b"coalesced-tail-blackhat".to_vec();
|
||||||
|
let mut bad_payload = vec![0u8; HANDSHAKE_LEN];
|
||||||
|
bad_payload.extend_from_slice(&coalesced_tail);
|
||||||
|
let bad_record = wrap_tls_application_data(&bad_payload);
|
||||||
|
let expected = wrap_tls_application_data(&coalesced_tail);
|
||||||
|
|
||||||
|
run_tls_success_mtproto_fail_capture(
|
||||||
|
harness,
|
||||||
|
"198.51.100.4:55004".parse().unwrap(),
|
||||||
|
client_hello,
|
||||||
|
bad_record,
|
||||||
|
Vec::new(),
|
||||||
|
expected,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_05_coalesced_tail_plus_next_record_keep_wire_order() {
|
||||||
|
let secret = [0xA5u8; 16];
|
||||||
|
let harness = build_mask_harness("a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5", 1);
|
||||||
|
let client_hello = make_valid_tls_client_hello(&secret, 15, 600, 0x45);
|
||||||
|
|
||||||
|
let coalesced_tail = b"inline-tail".to_vec();
|
||||||
|
let mut bad_payload = vec![0u8; HANDSHAKE_LEN];
|
||||||
|
bad_payload.extend_from_slice(&coalesced_tail);
|
||||||
|
let bad_record = wrap_tls_application_data(&bad_payload);
|
||||||
|
let next_record = wrap_tls_application_data(b"next-record");
|
||||||
|
|
||||||
|
let expected = [
|
||||||
|
wrap_tls_application_data(&coalesced_tail),
|
||||||
|
next_record.clone(),
|
||||||
|
]
|
||||||
|
.concat();
|
||||||
|
|
||||||
|
run_tls_success_mtproto_fail_capture(
|
||||||
|
harness,
|
||||||
|
"198.51.100.5:55005".parse().unwrap(),
|
||||||
|
client_hello,
|
||||||
|
bad_record,
|
||||||
|
vec![next_record],
|
||||||
|
expected,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_06_replayed_tls_hello_is_masked_without_serverhello() {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let harness = build_mask_harness("a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6", backend_addr.port());
|
||||||
|
let replay_checker = harness.replay_checker.clone();
|
||||||
|
let client_hello = make_valid_tls_client_hello(&[0xA6; 16], 16, 600, 0x46);
|
||||||
|
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
let first_tail = wrap_tls_application_data(b"seed-tail");
|
||||||
|
|
||||||
|
let expected_hello = client_hello.clone();
|
||||||
|
let expected_tail = first_tail.clone();
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut s1, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
|
s1.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
assert_eq!(got_tail, expected_tail);
|
||||||
|
drop(s1);
|
||||||
|
|
||||||
|
let (mut s2, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got_hello = vec![0u8; expected_hello.len()];
|
||||||
|
s2.read_exact(&mut got_hello).await.unwrap();
|
||||||
|
got_hello
|
||||||
|
});
|
||||||
|
|
||||||
|
let run_one = |checker: Arc<ReplayChecker>, send_mtproto: bool| {
|
||||||
|
let mut cfg = (*harness.config).clone();
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
let cfg = Arc::new(cfg);
|
||||||
|
let hello = client_hello.clone();
|
||||||
|
let invalid_mtproto_record = invalid_mtproto_record.clone();
|
||||||
|
let first_tail = first_tail.clone();
|
||||||
|
let stats = harness.stats.clone();
|
||||||
|
let upstream = harness.upstream_manager.clone();
|
||||||
|
let pool = harness.buffer_pool.clone();
|
||||||
|
let rng = harness.rng.clone();
|
||||||
|
let route = harness.route_runtime.clone();
|
||||||
|
let ipt = harness.ip_tracker.clone();
|
||||||
|
let beob = harness.beobachten.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let (server_side, mut client_side) = duplex(131072);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.6:55006".parse().unwrap(),
|
||||||
|
cfg,
|
||||||
|
stats,
|
||||||
|
upstream,
|
||||||
|
checker,
|
||||||
|
pool,
|
||||||
|
rng,
|
||||||
|
None,
|
||||||
|
route,
|
||||||
|
None,
|
||||||
|
ipt,
|
||||||
|
beob,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&hello).await.unwrap();
|
||||||
|
if send_mtproto {
|
||||||
|
let mut head = [0u8; 5];
|
||||||
|
client_side.read_exact(&mut head).await.unwrap();
|
||||||
|
assert_eq!(head[0], 0x16);
|
||||||
|
read_and_discard_tls_record_body(&mut client_side, head).await;
|
||||||
|
client_side.write_all(&invalid_mtproto_record).await.unwrap();
|
||||||
|
client_side.write_all(&first_tail).await.unwrap();
|
||||||
|
} else {
|
||||||
|
let mut one = [0u8; 1];
|
||||||
|
let no_server_hello = tokio::time::timeout(
|
||||||
|
Duration::from_millis(300),
|
||||||
|
client_side.read_exact(&mut one),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(no_server_hello.is_err() || no_server_hello.unwrap().is_err());
|
||||||
|
}
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
run_one(replay_checker.clone(), true).await;
|
||||||
|
run_one(replay_checker, false).await;
|
||||||
|
|
||||||
|
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(got, client_hello);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_07_truncated_clienthello_exact_prefix_is_forwarded() {
|
||||||
|
let mut payload = vec![0u8; 5 + 37];
|
||||||
|
payload[0] = 0x16;
|
||||||
|
payload[1] = 0x03;
|
||||||
|
payload[2] = 0x01;
|
||||||
|
payload[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||||
|
payload[5..].fill(0x71);
|
||||||
|
|
||||||
|
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), payload.clone(), payload).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_08_out_of_bounds_len_forwards_header_only() {
|
||||||
|
let header = vec![0x16, 0x03, 0x01, 0xFF, 0xFF];
|
||||||
|
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), header.clone(), header).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_09_fragmented_header_then_partial_body_masks_seen_bytes_only() {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
|
||||||
|
let expected = {
|
||||||
|
let mut x = vec![0u8; 5 + 11];
|
||||||
|
x[0] = 0x16;
|
||||||
|
x[1] = 0x03;
|
||||||
|
x[2] = 0x01;
|
||||||
|
x[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||||
|
x[5..].fill(0xCC);
|
||||||
|
x
|
||||||
|
};
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = vec![0u8; expected.len()];
|
||||||
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
got
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(65536);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.9:55009".parse().unwrap(),
|
||||||
|
Arc::new(cfg),
|
||||||
|
Arc::new(Stats::new()),
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&[0x16, 0x03]).await.unwrap();
|
||||||
|
client_side.write_all(&[0x01, 0x02, 0x58]).await.unwrap();
|
||||||
|
client_side.write_all(&vec![0xCC; 11]).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(got.len(), 16);
|
||||||
|
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_10_zero_handshake_timeout_with_delay_still_avoids_timeout_counter() {
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = 1;
|
||||||
|
cfg.timeouts.client_handshake = 0;
|
||||||
|
cfg.censorship.server_hello_delay_min_ms = 700;
|
||||||
|
cfg.censorship.server_hello_delay_max_ms = 700;
|
||||||
|
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
let (server_side, mut client_side) = duplex(4096);
|
||||||
|
let started = Instant::now();
|
||||||
|
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.10:55010".parse().unwrap(),
|
||||||
|
Arc::new(cfg),
|
||||||
|
stats.clone(),
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut invalid = vec![0u8; 5 + 700];
|
||||||
|
invalid[0] = 0x16;
|
||||||
|
invalid[1] = 0x03;
|
||||||
|
invalid[2] = 0x01;
|
||||||
|
invalid[3..5].copy_from_slice(&700u16.to_be_bytes());
|
||||||
|
invalid[5..].fill(0x66);
|
||||||
|
|
||||||
|
client_side.write_all(&invalid).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
assert_eq!(stats.get_handshake_timeouts(), 0);
|
||||||
|
assert!(started.elapsed() >= Duration::from_millis(650));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_11_parallel_bad_tls_probes_all_masked_without_timeouts() {
|
||||||
|
let n = 24usize;
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let mut seen = HashSet::new();
|
||||||
|
for _ in 0..n {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut hdr = [0u8; 5];
|
||||||
|
stream.read_exact(&mut hdr).await.unwrap();
|
||||||
|
seen.insert(hdr.to_vec());
|
||||||
|
}
|
||||||
|
seen
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut tasks = Vec::new();
|
||||||
|
for i in 0..n {
|
||||||
|
let mut hdr = [0u8; 5];
|
||||||
|
hdr[0] = 0x16;
|
||||||
|
hdr[1] = 0x03;
|
||||||
|
hdr[2] = 0x01;
|
||||||
|
hdr[3] = 0xFF;
|
||||||
|
hdr[4] = i as u8;
|
||||||
|
|
||||||
|
let cfg = Arc::new(cfg.clone());
|
||||||
|
let stats = stats.clone();
|
||||||
|
tasks.push(tokio::spawn(async move {
|
||||||
|
let (server_side, mut client_side) = duplex(4096);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
format!("198.51.100.11:{}", 56000 + i).parse().unwrap(),
|
||||||
|
cfg,
|
||||||
|
stats,
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
client_side.write_all(&hdr).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
hdr.to_vec()
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut expected = HashSet::new();
|
||||||
|
for t in tasks {
|
||||||
|
expected.insert(t.await.unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
let seen = tokio::time::timeout(Duration::from_secs(6), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(seen, expected);
|
||||||
|
assert_eq!(stats.get_handshake_timeouts(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_12_parallel_tls_success_mtproto_fail_sessions_keep_isolation() {
|
||||||
|
let sessions = 16usize;
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut expected = HashSet::new();
|
||||||
|
for i in 0..sessions {
|
||||||
|
let rec = wrap_tls_application_data(&vec![i as u8; 8 + i]);
|
||||||
|
expected.insert(rec);
|
||||||
|
}
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let mut got_set = HashSet::new();
|
||||||
|
for _ in 0..sessions {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut head = [0u8; 5];
|
||||||
|
stream.read_exact(&mut head).await.unwrap();
|
||||||
|
let len = u16::from_be_bytes([head[3], head[4]]) as usize;
|
||||||
|
let mut rec = vec![0u8; 5 + len];
|
||||||
|
rec[..5].copy_from_slice(&head);
|
||||||
|
stream.read_exact(&mut rec[5..]).await.unwrap();
|
||||||
|
got_set.insert(rec);
|
||||||
|
}
|
||||||
|
got_set
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut tasks = Vec::new();
|
||||||
|
for i in 0..sessions {
|
||||||
|
let mut harness = build_mask_harness("abababababababababababababababab", backend_addr.port());
|
||||||
|
let mut cfg = (*harness.config).clone();
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
harness.config = Arc::new(cfg);
|
||||||
|
tasks.push(tokio::spawn(async move {
|
||||||
|
let secret = [0xABu8; 16];
|
||||||
|
let hello = make_valid_tls_client_hello(&secret, 100 + i as u32, 600, 0x40 + (i as u8 % 10));
|
||||||
|
let bad = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
let tail = wrap_tls_application_data(&vec![i as u8; 8 + i]);
|
||||||
|
let (server_side, mut client_side) = duplex(131072);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
format!("198.51.100.12:{}", 56100 + i).parse().unwrap(),
|
||||||
|
harness.config,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&hello).await.unwrap();
|
||||||
|
let mut head = [0u8; 5];
|
||||||
|
client_side.read_exact(&mut head).await.unwrap();
|
||||||
|
read_and_discard_tls_record_body(&mut client_side, head).await;
|
||||||
|
client_side.write_all(&bad).await.unwrap();
|
||||||
|
client_side.write_all(&tail).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(5), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
tail
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut produced = HashSet::new();
|
||||||
|
for t in tasks {
|
||||||
|
produced.insert(t.await.unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
let observed = tokio::time::timeout(Duration::from_secs(8), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(produced, expected);
|
||||||
|
assert_eq!(observed, expected);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_13_backend_down_does_not_escalate_to_handshake_timeout() {
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = 1;
|
||||||
|
cfg.timeouts.client_handshake = 1;
|
||||||
|
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
let (server_side, mut client_side) = duplex(4096);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.13:55013".parse().unwrap(),
|
||||||
|
Arc::new(cfg),
|
||||||
|
stats.clone(),
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let bad = vec![0x16, 0x03, 0x01, 0xFF, 0x00];
|
||||||
|
client_side.write_all(&bad).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
assert_eq!(stats.get_handshake_timeouts(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_14_masking_disabled_path_finishes_cleanly() {
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.censorship.mask = false;
|
||||||
|
cfg.timeouts.client_handshake = 1;
|
||||||
|
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
let (server_side, mut client_side) = duplex(4096);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.14:55014".parse().unwrap(),
|
||||||
|
Arc::new(cfg),
|
||||||
|
stats.clone(),
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let bad = vec![0x16, 0x03, 0x01, 0xFF, 0xF0];
|
||||||
|
client_side.write_all(&bad).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
assert_eq!(stats.get_handshake_timeouts(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_15_light_fuzz_tls_lengths_and_fragmentation() {
|
||||||
|
let mut seed = 0x9E3779B97F4A7C15u64;
|
||||||
|
|
||||||
|
for idx in 0..20u16 {
|
||||||
|
seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1);
|
||||||
|
let mut tls_len = (seed as usize) % 20000;
|
||||||
|
if idx % 3 == 0 {
|
||||||
|
tls_len = MAX_TLS_PLAINTEXT_SIZE + 1 + (tls_len % 1024);
|
||||||
|
}
|
||||||
|
|
||||||
|
let body_to_send = if (MIN_TLS_CLIENT_HELLO_SIZE..=MAX_TLS_PLAINTEXT_SIZE).contains(&tls_len)
|
||||||
|
{
|
||||||
|
(seed as usize % 29).min(tls_len.saturating_sub(1))
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut probe = vec![0u8; 5 + body_to_send];
|
||||||
|
probe[0] = 0x16;
|
||||||
|
probe[1] = 0x03;
|
||||||
|
probe[2] = 0x01;
|
||||||
|
probe[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||||
|
for b in &mut probe[5..] {
|
||||||
|
seed = seed.wrapping_mul(2862933555777941757).wrapping_add(3037000493);
|
||||||
|
*b = (seed >> 24) as u8;
|
||||||
|
}
|
||||||
|
|
||||||
|
let expected = probe.clone();
|
||||||
|
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), probe, expected).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn blackhat_campaign_16_mixed_probe_burst_stress_finishes_without_panics() {
|
||||||
|
let cases = 18usize;
|
||||||
|
let mut tasks = Vec::new();
|
||||||
|
|
||||||
|
for i in 0..cases {
|
||||||
|
tasks.push(tokio::spawn(async move {
|
||||||
|
if i % 2 == 0 {
|
||||||
|
let mut probe = vec![0u8; 5 + (i % 13)];
|
||||||
|
probe[0] = 0x16;
|
||||||
|
probe[1] = 0x03;
|
||||||
|
probe[2] = 0x01;
|
||||||
|
probe[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||||
|
probe[5..].fill((0x90 + i as u8) ^ 0x5A);
|
||||||
|
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), probe.clone(), probe).await;
|
||||||
|
} else {
|
||||||
|
let hdr = vec![0x16, 0x03, 0x01, 0xFF, i as u8];
|
||||||
|
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), hdr.clone(), hdr).await;
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for task in tasks {
|
||||||
|
task.await.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,244 @@
|
||||||
|
use super::*;
|
||||||
|
use crate::config::{UpstreamConfig, UpstreamType};
|
||||||
|
use crate::crypto::sha256_hmac;
|
||||||
|
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_VERSION};
|
||||||
|
use crate::protocol::tls;
|
||||||
|
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::time::{Duration, Instant};
|
||||||
|
|
||||||
|
struct PipelineHarness {
|
||||||
|
config: Arc<ProxyConfig>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_harness(config: ProxyConfig) -> PipelineHarness {
|
||||||
|
let config = Arc::new(config);
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
let upstream_manager = Arc::new(UpstreamManager::new(
|
||||||
|
vec![UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct {
|
||||||
|
interface: None,
|
||||||
|
bind_addresses: None,
|
||||||
|
},
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
|
}],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
stats.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
PipelineHarness {
|
||||||
|
config,
|
||||||
|
stats,
|
||||||
|
upstream_manager,
|
||||||
|
replay_checker: Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
|
||||||
|
buffer_pool: Arc::new(BufferPool::new()),
|
||||||
|
rng: Arc::new(SecureRandom::new()),
|
||||||
|
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
ip_tracker: Arc::new(UserIpTracker::new()),
|
||||||
|
beobachten: Arc::new(BeobachtenStore::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
|
||||||
|
assert!(tls_len <= u16::MAX as usize, "TLS length must fit into record header");
|
||||||
|
|
||||||
|
let total_len = 5 + tls_len;
|
||||||
|
let mut handshake = vec![fill; total_len];
|
||||||
|
|
||||||
|
handshake[0] = 0x16;
|
||||||
|
handshake[1] = 0x03;
|
||||||
|
handshake[2] = 0x01;
|
||||||
|
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||||
|
|
||||||
|
let session_id_len: usize = 32;
|
||||||
|
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||||
|
|
||||||
|
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||||
|
let computed = sha256_hmac(secret, &handshake);
|
||||||
|
let mut digest = computed;
|
||||||
|
let ts = timestamp.to_le_bytes();
|
||||||
|
for i in 0..4 {
|
||||||
|
digest[28 + i] ^= ts[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||||
|
.copy_from_slice(&digest);
|
||||||
|
handshake
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
|
||||||
|
let mut record = Vec::with_capacity(5 + payload.len());
|
||||||
|
record.push(0x17);
|
||||||
|
record.extend_from_slice(&TLS_VERSION);
|
||||||
|
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
|
||||||
|
record.extend_from_slice(payload);
|
||||||
|
record
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn read_and_discard_tls_record_body<T>(stream: &mut T, header: [u8; 5])
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncRead + Unpin,
|
||||||
|
{
|
||||||
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
|
let mut body = vec![0u8; len];
|
||||||
|
stream.read_exact(&mut body).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn masking_runs_outside_handshake_timeout_budget_with_high_reject_delay() {
|
||||||
|
let mut config = ProxyConfig::default();
|
||||||
|
config.general.beobachten = false;
|
||||||
|
config.censorship.mask = true;
|
||||||
|
config.censorship.mask_unix_sock = None;
|
||||||
|
config.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
config.censorship.mask_port = 1;
|
||||||
|
config.timeouts.client_handshake = 0;
|
||||||
|
config.censorship.server_hello_delay_min_ms = 730;
|
||||||
|
config.censorship.server_hello_delay_max_ms = 730;
|
||||||
|
|
||||||
|
let harness = build_harness(config);
|
||||||
|
let stats = harness.stats.clone();
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(4096);
|
||||||
|
let peer: SocketAddr = "198.51.100.241:56541".parse().unwrap();
|
||||||
|
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
peer,
|
||||||
|
harness.config,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut invalid_hello = vec![0u8; 5 + 600];
|
||||||
|
invalid_hello[0] = 0x16;
|
||||||
|
invalid_hello[1] = 0x03;
|
||||||
|
invalid_hello[2] = 0x01;
|
||||||
|
invalid_hello[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||||
|
invalid_hello[5..].fill(0x44);
|
||||||
|
|
||||||
|
let started = Instant::now();
|
||||||
|
client_side.write_all(&invalid_hello).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(result.is_ok(), "bad-client fallback must not be canceled by handshake timeout");
|
||||||
|
assert_eq!(
|
||||||
|
stats.get_handshake_timeouts(),
|
||||||
|
0,
|
||||||
|
"masking fallback path must not increment handshake timeout counter"
|
||||||
|
);
|
||||||
|
assert!(
|
||||||
|
started.elapsed() >= Duration::from_millis(700),
|
||||||
|
"configured reject delay should still be visible before masking"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn tls_mtproto_bad_client_does_not_reinject_clienthello_into_mask_backend() {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut config = ProxyConfig::default();
|
||||||
|
config.general.beobachten = false;
|
||||||
|
config.censorship.mask = true;
|
||||||
|
config.censorship.mask_unix_sock = None;
|
||||||
|
config.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
config.censorship.mask_port = backend_addr.port();
|
||||||
|
config.censorship.mask_proxy_protocol = 0;
|
||||||
|
config.access.ignore_time_skew = true;
|
||||||
|
config
|
||||||
|
.access
|
||||||
|
.users
|
||||||
|
.insert("user".to_string(), "d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0".to_string());
|
||||||
|
|
||||||
|
let harness = build_harness(config);
|
||||||
|
|
||||||
|
let secret = [0xD0u8; 16];
|
||||||
|
let client_hello = make_valid_tls_client_hello(&secret, 0, 600, 0x41);
|
||||||
|
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
let trailing_record = wrap_tls_application_data(b"no-clienthello-reinject");
|
||||||
|
let expected_trailing = trailing_record.clone();
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
|
||||||
|
let mut got = vec![0u8; expected_trailing.len()];
|
||||||
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
got,
|
||||||
|
expected_trailing,
|
||||||
|
"mask backend must receive only post-handshake trailing TLS records"
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(131072);
|
||||||
|
let peer: SocketAddr = "198.51.100.242:56542".parse().unwrap();
|
||||||
|
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
peer,
|
||||||
|
harness.config,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&client_hello).await.unwrap();
|
||||||
|
|
||||||
|
let mut tls_response_head = [0u8; 5];
|
||||||
|
client_side.read_exact(&mut tls_response_head).await.unwrap();
|
||||||
|
assert_eq!(tls_response_head[0], 0x16);
|
||||||
|
read_and_discard_tls_record_body(&mut client_side, tls_response_head).await;
|
||||||
|
|
||||||
|
client_side.write_all(&invalid_mtproto_record).await.unwrap();
|
||||||
|
client_side.write_all(&trailing_record).await.unwrap();
|
||||||
|
|
||||||
|
tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
drop(client_side);
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,192 @@
|
||||||
|
use super::*;
|
||||||
|
use crate::config::{UpstreamConfig, UpstreamType};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::time::{Duration, Instant};
|
||||||
|
|
||||||
|
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||||
|
Arc::new(UpstreamManager::new(
|
||||||
|
vec![UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct {
|
||||||
|
interface: None,
|
||||||
|
bind_addresses: None,
|
||||||
|
},
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
|
}],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
stats,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn percentile_ms(mut values: Vec<u128>, p_num: usize, p_den: usize) -> u128 {
|
||||||
|
values.sort_unstable();
|
||||||
|
if values.is_empty() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
let idx = ((values.len() - 1) * p_num) / p_den;
|
||||||
|
values[idx]
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn measure_reject_duration_ms(body_sent: usize) -> u128 {
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = 1;
|
||||||
|
cfg.timeouts.client_handshake = 1;
|
||||||
|
cfg.censorship.server_hello_delay_min_ms = 700;
|
||||||
|
cfg.censorship.server_hello_delay_max_ms = 700;
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(65536);
|
||||||
|
let started = Instant::now();
|
||||||
|
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.170:56170".parse().unwrap(),
|
||||||
|
Arc::new(cfg),
|
||||||
|
Arc::new(Stats::new()),
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut probe = vec![0u8; 5 + body_sent];
|
||||||
|
probe[0] = 0x16;
|
||||||
|
probe[1] = 0x03;
|
||||||
|
probe[2] = 0x01;
|
||||||
|
probe[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||||
|
probe[5..].fill(0xA7);
|
||||||
|
|
||||||
|
client_side.write_all(&probe).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
|
||||||
|
started.elapsed().as_millis()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn capture_forwarded_len(body_sent: usize) -> usize {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
cfg.timeouts.client_handshake = 1;
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = Vec::new();
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(2), stream.read_to_end(&mut got)).await;
|
||||||
|
got.len()
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(65536);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.171:56171".parse().unwrap(),
|
||||||
|
Arc::new(cfg),
|
||||||
|
Arc::new(Stats::new()),
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut probe = vec![0u8; 5 + body_sent];
|
||||||
|
probe[0] = 0x16;
|
||||||
|
probe[1] = 0x03;
|
||||||
|
probe[2] = 0x01;
|
||||||
|
probe[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||||
|
probe[5..].fill(0xB4);
|
||||||
|
|
||||||
|
client_side.write_all(&probe).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
|
||||||
|
tokio::time::timeout(Duration::from_secs(4), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn diagnostic_timing_profiles_are_within_realistic_guardrails() {
|
||||||
|
let classes = [17usize, 511usize, 1023usize, 4095usize];
|
||||||
|
for class in classes {
|
||||||
|
let mut samples = Vec::new();
|
||||||
|
for _ in 0..8 {
|
||||||
|
samples.push(measure_reject_duration_ms(class).await);
|
||||||
|
}
|
||||||
|
|
||||||
|
let p50 = percentile_ms(samples.clone(), 50, 100);
|
||||||
|
let p95 = percentile_ms(samples.clone(), 95, 100);
|
||||||
|
let max = *samples.iter().max().unwrap();
|
||||||
|
println!(
|
||||||
|
"diagnostic_timing class={} p50={}ms p95={}ms max={}ms",
|
||||||
|
class, p50, p95, max
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(p50 >= 650, "p50 too low for delayed reject class={}", class);
|
||||||
|
assert!(p95 <= 1200, "p95 too high for delayed reject class={}", class);
|
||||||
|
assert!(max <= 1500, "max too high for delayed reject class={}", class);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn diagnostic_forwarded_size_profiles_by_probe_class() {
|
||||||
|
let classes = [0usize, 1usize, 7usize, 17usize, 63usize, 511usize, 1023usize, 2047usize];
|
||||||
|
let mut observed = Vec::new();
|
||||||
|
|
||||||
|
for class in classes {
|
||||||
|
let len = capture_forwarded_len(class).await;
|
||||||
|
println!("diagnostic_shape class={} forwarded_len={}", class, len);
|
||||||
|
observed.push(len as u128);
|
||||||
|
assert_eq!(len, 5 + class, "unexpected forwarded len for class={}", class);
|
||||||
|
}
|
||||||
|
|
||||||
|
let p50 = percentile_ms(observed.clone(), 50, 100);
|
||||||
|
let p95 = percentile_ms(observed.clone(), 95, 100);
|
||||||
|
let max = *observed.iter().max().unwrap();
|
||||||
|
println!(
|
||||||
|
"diagnostic_shape_summary p50={}bytes p95={}bytes max={}bytes",
|
||||||
|
p50, p95, max
|
||||||
|
);
|
||||||
|
|
||||||
|
assert!(p95 >= p50);
|
||||||
|
assert!(max >= p95);
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,701 @@
|
||||||
|
use super::*;
|
||||||
|
use crate::config::{UpstreamConfig, UpstreamType};
|
||||||
|
use crate::crypto::sha256_hmac;
|
||||||
|
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_RECORD_APPLICATION, TLS_VERSION};
|
||||||
|
use crate::protocol::tls;
|
||||||
|
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::time::{Duration, Instant};
|
||||||
|
|
||||||
|
struct Harness {
|
||||||
|
config: Arc<ProxyConfig>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||||
|
Arc::new(UpstreamManager::new(
|
||||||
|
vec![UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct {
|
||||||
|
interface: None,
|
||||||
|
bind_addresses: None,
|
||||||
|
},
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
|
}],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
stats,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_harness(secret_hex: &str, mask_port: u16) -> Harness {
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = mask_port;
|
||||||
|
cfg.censorship.mask_proxy_protocol = 0;
|
||||||
|
cfg.access.ignore_time_skew = true;
|
||||||
|
cfg.access
|
||||||
|
.users
|
||||||
|
.insert("user".to_string(), secret_hex.to_string());
|
||||||
|
|
||||||
|
let config = Arc::new(cfg);
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
|
||||||
|
Harness {
|
||||||
|
config,
|
||||||
|
stats: stats.clone(),
|
||||||
|
upstream_manager: new_upstream_manager(stats),
|
||||||
|
replay_checker: Arc::new(ReplayChecker::new(512, Duration::from_secs(60))),
|
||||||
|
buffer_pool: Arc::new(BufferPool::new()),
|
||||||
|
rng: Arc::new(SecureRandom::new()),
|
||||||
|
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
ip_tracker: Arc::new(UserIpTracker::new()),
|
||||||
|
beobachten: Arc::new(BeobachtenStore::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
|
||||||
|
assert!(tls_len <= u16::MAX as usize, "TLS length must fit into record header");
|
||||||
|
|
||||||
|
let total_len = 5 + tls_len;
|
||||||
|
let mut handshake = vec![fill; total_len];
|
||||||
|
|
||||||
|
handshake[0] = 0x16;
|
||||||
|
handshake[1] = 0x03;
|
||||||
|
handshake[2] = 0x01;
|
||||||
|
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||||
|
|
||||||
|
let session_id_len: usize = 32;
|
||||||
|
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||||
|
|
||||||
|
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||||
|
let computed = sha256_hmac(secret, &handshake);
|
||||||
|
let mut digest = computed;
|
||||||
|
let ts = timestamp.to_le_bytes();
|
||||||
|
for i in 0..4 {
|
||||||
|
digest[28 + i] ^= ts[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||||
|
.copy_from_slice(&digest);
|
||||||
|
handshake
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
|
||||||
|
let mut record = Vec::with_capacity(5 + payload.len());
|
||||||
|
record.push(TLS_RECORD_APPLICATION);
|
||||||
|
record.extend_from_slice(&TLS_VERSION);
|
||||||
|
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
|
||||||
|
record.extend_from_slice(payload);
|
||||||
|
record
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn read_tls_record_body<T>(stream: &mut T, header: [u8; 5])
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncRead + Unpin,
|
||||||
|
{
|
||||||
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
|
let mut body = vec![0u8; len];
|
||||||
|
stream.read_exact(&mut body).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_tls_success_mtproto_fail_capture(
|
||||||
|
secret_hex: &str,
|
||||||
|
secret: [u8; 16],
|
||||||
|
timestamp: u32,
|
||||||
|
trailing_records: Vec<Vec<u8>>,
|
||||||
|
) -> Vec<u8> {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let expected_len = trailing_records.iter().map(Vec::len).sum::<usize>();
|
||||||
|
let expected_concat = trailing_records.concat();
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = vec![0u8; expected_len];
|
||||||
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
got
|
||||||
|
});
|
||||||
|
|
||||||
|
let harness = build_harness(secret_hex, backend_addr.port());
|
||||||
|
let client_hello = make_valid_tls_client_hello(&secret, timestamp, 600, 0x42);
|
||||||
|
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(262144);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.210:56010".parse().unwrap(),
|
||||||
|
harness.config,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&client_hello).await.unwrap();
|
||||||
|
|
||||||
|
let mut tls_response_head = [0u8; 5];
|
||||||
|
client_side.read_exact(&mut tls_response_head).await.unwrap();
|
||||||
|
assert_eq!(tls_response_head[0], 0x16);
|
||||||
|
read_tls_record_body(&mut client_side, tls_response_head).await;
|
||||||
|
|
||||||
|
client_side.write_all(&invalid_mtproto_record).await.unwrap();
|
||||||
|
for record in trailing_records {
|
||||||
|
client_side.write_all(&record).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let got = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(got, expected_concat);
|
||||||
|
|
||||||
|
drop(client_side);
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
got
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn masking_budget_survives_zero_handshake_timeout_with_delay() {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
cfg.timeouts.client_handshake = 0;
|
||||||
|
cfg.censorship.server_hello_delay_min_ms = 720;
|
||||||
|
cfg.censorship.server_hello_delay_max_ms = 720;
|
||||||
|
|
||||||
|
let config = Arc::new(cfg);
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = vec![0u8; 605];
|
||||||
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
got
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(65536);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.211:56011".parse().unwrap(),
|
||||||
|
config,
|
||||||
|
stats.clone(),
|
||||||
|
new_upstream_manager(stats.clone()),
|
||||||
|
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut invalid_hello = vec![0u8; 605];
|
||||||
|
invalid_hello[0] = 0x16;
|
||||||
|
invalid_hello[1] = 0x03;
|
||||||
|
invalid_hello[2] = 0x01;
|
||||||
|
invalid_hello[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||||
|
invalid_hello[5..].fill(0xA1);
|
||||||
|
|
||||||
|
let started = Instant::now();
|
||||||
|
client_side.write_all(&invalid_hello).await.unwrap();
|
||||||
|
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(result.is_ok());
|
||||||
|
assert_eq!(stats.get_handshake_timeouts(), 0);
|
||||||
|
assert!(started.elapsed() >= Duration::from_millis(680));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn tls_mtproto_fail_forwards_only_trailing_record() {
|
||||||
|
let tail = wrap_tls_application_data(b"tail-only");
|
||||||
|
let got = run_tls_success_mtproto_fail_capture(
|
||||||
|
"c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1c1",
|
||||||
|
[0xC1; 16],
|
||||||
|
1,
|
||||||
|
vec![tail.clone()],
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert_eq!(got, tail);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn replayed_tls_hello_gets_no_serverhello_and_is_masked() {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let harness = build_harness("c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2c2", backend_addr.port());
|
||||||
|
let secret = [0xC2u8; 16];
|
||||||
|
let hello = make_valid_tls_client_hello(&secret, 2, 600, 0x41);
|
||||||
|
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
let first_tail = wrap_tls_application_data(b"seed");
|
||||||
|
|
||||||
|
let expected_hello = hello.clone();
|
||||||
|
let expected_tail = first_tail.clone();
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut s1, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
|
s1.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
assert_eq!(got_tail, expected_tail);
|
||||||
|
drop(s1);
|
||||||
|
|
||||||
|
let (mut s2, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got_hello = vec![0u8; expected_hello.len()];
|
||||||
|
s2.read_exact(&mut got_hello).await.unwrap();
|
||||||
|
assert_eq!(got_hello, expected_hello);
|
||||||
|
});
|
||||||
|
|
||||||
|
let run_session = |send_mtproto: bool| {
|
||||||
|
let (server_side, mut client_side) = duplex(131072);
|
||||||
|
let config = harness.config.clone();
|
||||||
|
let stats = harness.stats.clone();
|
||||||
|
let upstream = harness.upstream_manager.clone();
|
||||||
|
let replay = harness.replay_checker.clone();
|
||||||
|
let pool = harness.buffer_pool.clone();
|
||||||
|
let rng = harness.rng.clone();
|
||||||
|
let route = harness.route_runtime.clone();
|
||||||
|
let ipt = harness.ip_tracker.clone();
|
||||||
|
let beob = harness.beobachten.clone();
|
||||||
|
let hello = hello.clone();
|
||||||
|
let invalid_mtproto_record = invalid_mtproto_record.clone();
|
||||||
|
let first_tail = first_tail.clone();
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.212:56012".parse().unwrap(),
|
||||||
|
config,
|
||||||
|
stats,
|
||||||
|
upstream,
|
||||||
|
replay,
|
||||||
|
pool,
|
||||||
|
rng,
|
||||||
|
None,
|
||||||
|
route,
|
||||||
|
None,
|
||||||
|
ipt,
|
||||||
|
beob,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&hello).await.unwrap();
|
||||||
|
if send_mtproto {
|
||||||
|
let mut head = [0u8; 5];
|
||||||
|
client_side.read_exact(&mut head).await.unwrap();
|
||||||
|
assert_eq!(head[0], 0x16);
|
||||||
|
read_tls_record_body(&mut client_side, head).await;
|
||||||
|
client_side.write_all(&invalid_mtproto_record).await.unwrap();
|
||||||
|
client_side.write_all(&first_tail).await.unwrap();
|
||||||
|
} else {
|
||||||
|
let mut one = [0u8; 1];
|
||||||
|
let no_server_hello = tokio::time::timeout(
|
||||||
|
Duration::from_millis(300),
|
||||||
|
client_side.read_exact(&mut one),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(no_server_hello.is_err() || no_server_hello.unwrap().is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
run_session(true).await;
|
||||||
|
run_session(false).await;
|
||||||
|
|
||||||
|
tokio::time::timeout(Duration::from_secs(5), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn connects_bad_increments_once_per_invalid_mtproto() {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let harness = build_harness("c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3c3", backend_addr.port());
|
||||||
|
let stats = harness.stats.clone();
|
||||||
|
let bad_before = stats.get_connects_bad();
|
||||||
|
|
||||||
|
let tail = wrap_tls_application_data(b"accounting");
|
||||||
|
let expected_tail = tail.clone();
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = vec![0u8; expected_tail.len()];
|
||||||
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
assert_eq!(got, expected_tail);
|
||||||
|
});
|
||||||
|
|
||||||
|
let hello = make_valid_tls_client_hello(&[0xC3; 16], 3, 600, 0x42);
|
||||||
|
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(131072);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.213:56013".parse().unwrap(),
|
||||||
|
harness.config,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&hello).await.unwrap();
|
||||||
|
let mut head = [0u8; 5];
|
||||||
|
client_side.read_exact(&mut head).await.unwrap();
|
||||||
|
read_tls_record_body(&mut client_side, head).await;
|
||||||
|
client_side.write_all(&invalid_mtproto_record).await.unwrap();
|
||||||
|
client_side.write_all(&tail).await.unwrap();
|
||||||
|
|
||||||
|
tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(stats.get_connects_bad(), bad_before + 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn truncated_clienthello_forwards_only_seen_prefix() {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
|
||||||
|
let config = Arc::new(cfg);
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
|
||||||
|
let expected_prefix_len = 5 + 17;
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = vec![0u8; expected_prefix_len];
|
||||||
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
got
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(65536);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.214:56014".parse().unwrap(),
|
||||||
|
config,
|
||||||
|
stats,
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut hello = vec![0u8; 5 + 17];
|
||||||
|
hello[0] = 0x16;
|
||||||
|
hello[1] = 0x03;
|
||||||
|
hello[2] = 0x01;
|
||||||
|
hello[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||||
|
hello[5..].fill(0x55);
|
||||||
|
|
||||||
|
client_side.write_all(&hello).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let got = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(got, hello);
|
||||||
|
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn out_of_bounds_tls_len_forwards_header_only() {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
|
||||||
|
let config = Arc::new(cfg);
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = [0u8; 5];
|
||||||
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
got
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(8192);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.215:56015".parse().unwrap(),
|
||||||
|
config,
|
||||||
|
Arc::new(Stats::new()),
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let hdr = [0x16, 0x03, 0x01, 0x42, 0x69];
|
||||||
|
client_side.write_all(&hdr).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let got = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(got, hdr);
|
||||||
|
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn non_tls_with_modes_disabled_is_masked() {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.general.modes.classic = false;
|
||||||
|
cfg.general.modes.secure = false;
|
||||||
|
|
||||||
|
let config = Arc::new(cfg);
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = [0u8; 5];
|
||||||
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
got
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(8192);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.216:56016".parse().unwrap(),
|
||||||
|
config,
|
||||||
|
Arc::new(Stats::new()),
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let probe = *b"HELLO";
|
||||||
|
client_side.write_all(&probe).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let got = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(got, probe);
|
||||||
|
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn concurrent_tls_mtproto_fail_sessions_are_isolated() {
|
||||||
|
let sessions = 12usize;
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut expected = std::collections::HashSet::new();
|
||||||
|
for idx in 0..sessions {
|
||||||
|
let payload = vec![idx as u8; 32 + idx];
|
||||||
|
expected.insert(wrap_tls_application_data(&payload));
|
||||||
|
}
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let mut remaining = expected;
|
||||||
|
for _ in 0..sessions {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut header = [0u8; 5];
|
||||||
|
stream.read_exact(&mut header).await.unwrap();
|
||||||
|
assert_eq!(header[0], TLS_RECORD_APPLICATION);
|
||||||
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
|
let mut record = vec![0u8; 5 + len];
|
||||||
|
record[..5].copy_from_slice(&header);
|
||||||
|
stream.read_exact(&mut record[5..]).await.unwrap();
|
||||||
|
assert!(remaining.remove(&record));
|
||||||
|
}
|
||||||
|
assert!(remaining.is_empty());
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut tasks = Vec::with_capacity(sessions);
|
||||||
|
for idx in 0..sessions {
|
||||||
|
let secret_hex = "c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4c4";
|
||||||
|
let harness = build_harness(secret_hex, backend_addr.port());
|
||||||
|
let hello = make_valid_tls_client_hello(&[0xC4; 16], 20 + idx as u32, 600, 0x40 + idx as u8);
|
||||||
|
let invalid_mtproto = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
let trailing = wrap_tls_application_data(&vec![idx as u8; 32 + idx]);
|
||||||
|
let peer: SocketAddr = format!("198.51.100.217:{}", 56100 + idx as u16)
|
||||||
|
.parse()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
tasks.push(tokio::spawn(async move {
|
||||||
|
let (server_side, mut client_side) = duplex(131072);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
peer,
|
||||||
|
harness.config,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&hello).await.unwrap();
|
||||||
|
let mut head = [0u8; 5];
|
||||||
|
client_side.read_exact(&mut head).await.unwrap();
|
||||||
|
read_tls_record_body(&mut client_side, head).await;
|
||||||
|
client_side.write_all(&invalid_mtproto).await.unwrap();
|
||||||
|
client_side.write_all(&trailing).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for task in tasks {
|
||||||
|
task.await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::time::timeout(Duration::from_secs(6), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! tail_length_case {
|
||||||
|
($name:ident, $hex:expr, $secret:expr, $ts:expr, $len:expr) => {
|
||||||
|
#[tokio::test]
|
||||||
|
async fn $name() {
|
||||||
|
let mut payload = vec![0u8; $len];
|
||||||
|
for (i, b) in payload.iter_mut().enumerate() {
|
||||||
|
*b = (i as u8).wrapping_mul(17).wrapping_add(5);
|
||||||
|
}
|
||||||
|
let record = wrap_tls_application_data(&payload);
|
||||||
|
let got = run_tls_success_mtproto_fail_capture($hex, $secret, $ts, vec![record.clone()]).await;
|
||||||
|
assert_eq!(got, record);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
tail_length_case!(tail_len_1_preserved, "d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1d1", [0xD1; 16], 30, 1);
|
||||||
|
tail_length_case!(tail_len_2_preserved, "d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2d2", [0xD2; 16], 31, 2);
|
||||||
|
tail_length_case!(tail_len_3_preserved, "d3d3d3d3d3d3d3d3d3d3d3d3d3d3d3d3", [0xD3; 16], 32, 3);
|
||||||
|
tail_length_case!(tail_len_7_preserved, "d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4d4", [0xD4; 16], 33, 7);
|
||||||
|
tail_length_case!(tail_len_31_preserved, "d5d5d5d5d5d5d5d5d5d5d5d5d5d5d5d5", [0xD5; 16], 34, 31);
|
||||||
|
tail_length_case!(tail_len_127_preserved, "d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6d6", [0xD6; 16], 35, 127);
|
||||||
|
tail_length_case!(tail_len_511_preserved, "d7d7d7d7d7d7d7d7d7d7d7d7d7d7d7d7", [0xD7; 16], 36, 511);
|
||||||
|
tail_length_case!(tail_len_1023_preserved, "d8d8d8d8d8d8d8d8d8d8d8d8d8d8d8d8", [0xD8; 16], 37, 1023);
|
||||||
|
|
@ -0,0 +1,556 @@
|
||||||
|
use super::*;
|
||||||
|
use crate::config::{UpstreamConfig, UpstreamType};
|
||||||
|
use crate::crypto::sha256_hmac;
|
||||||
|
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_VERSION};
|
||||||
|
use crate::protocol::tls;
|
||||||
|
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::time::{Duration, Instant};
|
||||||
|
|
||||||
|
struct RedTeamHarness {
|
||||||
|
config: Arc<ProxyConfig>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_harness(secret_hex: &str, mask_port: u16) -> RedTeamHarness {
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = mask_port;
|
||||||
|
cfg.censorship.mask_proxy_protocol = 0;
|
||||||
|
cfg.access.ignore_time_skew = true;
|
||||||
|
cfg.access
|
||||||
|
.users
|
||||||
|
.insert("user".to_string(), secret_hex.to_string());
|
||||||
|
|
||||||
|
let config = Arc::new(cfg);
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
let upstream_manager = Arc::new(UpstreamManager::new(
|
||||||
|
vec![UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct {
|
||||||
|
interface: None,
|
||||||
|
bind_addresses: None,
|
||||||
|
},
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
|
}],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
stats.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
RedTeamHarness {
|
||||||
|
config,
|
||||||
|
stats,
|
||||||
|
upstream_manager,
|
||||||
|
replay_checker: Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
|
||||||
|
buffer_pool: Arc::new(BufferPool::new()),
|
||||||
|
rng: Arc::new(SecureRandom::new()),
|
||||||
|
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
ip_tracker: Arc::new(UserIpTracker::new()),
|
||||||
|
beobachten: Arc::new(BeobachtenStore::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
|
||||||
|
assert!(tls_len <= u16::MAX as usize, "TLS length must fit into record header");
|
||||||
|
|
||||||
|
let total_len = 5 + tls_len;
|
||||||
|
let mut handshake = vec![fill; total_len];
|
||||||
|
handshake[0] = 0x16;
|
||||||
|
handshake[1] = 0x03;
|
||||||
|
handshake[2] = 0x01;
|
||||||
|
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||||
|
|
||||||
|
let session_id_len: usize = 32;
|
||||||
|
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||||
|
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||||
|
|
||||||
|
let computed = sha256_hmac(secret, &handshake);
|
||||||
|
let mut digest = computed;
|
||||||
|
let ts = timestamp.to_le_bytes();
|
||||||
|
for i in 0..4 {
|
||||||
|
digest[28 + i] ^= ts[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||||
|
.copy_from_slice(&digest);
|
||||||
|
handshake
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
|
||||||
|
let mut record = Vec::with_capacity(5 + payload.len());
|
||||||
|
record.push(0x17);
|
||||||
|
record.extend_from_slice(&TLS_VERSION);
|
||||||
|
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
|
||||||
|
record.extend_from_slice(payload);
|
||||||
|
record
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_tls_success_mtproto_fail_session(
|
||||||
|
secret_hex: &str,
|
||||||
|
secret: [u8; 16],
|
||||||
|
timestamp: u32,
|
||||||
|
tail: Vec<u8>,
|
||||||
|
) -> Vec<u8> {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let harness = build_harness(secret_hex, backend_addr.port());
|
||||||
|
let client_hello = make_valid_tls_client_hello(&secret, timestamp, 600, 0x42);
|
||||||
|
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
let trailing_record = wrap_tls_application_data(&tail);
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = vec![0u8; trailing_record.len()];
|
||||||
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
got
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(262144);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.250:56900".parse().unwrap(),
|
||||||
|
harness.config,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&client_hello).await.unwrap();
|
||||||
|
let mut head = [0u8; 5];
|
||||||
|
client_side.read_exact(&mut head).await.unwrap();
|
||||||
|
assert_eq!(head[0], 0x16);
|
||||||
|
let body_len = u16::from_be_bytes([head[3], head[4]]) as usize;
|
||||||
|
let mut body = vec![0u8; body_len];
|
||||||
|
client_side.read_exact(&mut body).await.unwrap();
|
||||||
|
|
||||||
|
client_side.write_all(&invalid_mtproto_record).await.unwrap();
|
||||||
|
client_side.write_all(&wrap_tls_application_data(&tail)).await.unwrap();
|
||||||
|
|
||||||
|
let forwarded = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
drop(client_side);
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
forwarded
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "red-team expected-fail: demonstrates that post-TLS fallback still forwards data to backend"]
|
||||||
|
async fn redteam_01_backend_receives_no_data_after_mtproto_fail() {
|
||||||
|
let forwarded = run_tls_success_mtproto_fail_session(
|
||||||
|
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
|
||||||
|
[0xAA; 16],
|
||||||
|
1,
|
||||||
|
b"probe-a".to_vec(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert!(forwarded.is_empty(), "backend unexpectedly received fallback bytes");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "red-team expected-fail: strict no-fallback policy hypothesis"]
|
||||||
|
async fn redteam_02_backend_must_never_receive_tls_records_after_mtproto_fail() {
|
||||||
|
let forwarded = run_tls_success_mtproto_fail_session(
|
||||||
|
"abababababababababababababababab",
|
||||||
|
[0xAB; 16],
|
||||||
|
2,
|
||||||
|
b"probe-b".to_vec(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert_ne!(forwarded[0], 0x17, "received TLS application record despite strict policy");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "red-team expected-fail: impossible timing uniformity target"]
|
||||||
|
async fn redteam_03_masking_duration_must_be_less_than_1ms_when_backend_down() {
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = 1;
|
||||||
|
cfg.access.ignore_time_skew = true;
|
||||||
|
cfg.access
|
||||||
|
.users
|
||||||
|
.insert("user".to_string(), "acacacacacacacacacacacacacacacac".to_string());
|
||||||
|
|
||||||
|
let harness = RedTeamHarness {
|
||||||
|
config: Arc::new(cfg),
|
||||||
|
stats: Arc::new(Stats::new()),
|
||||||
|
upstream_manager: Arc::new(UpstreamManager::new(
|
||||||
|
vec![UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct {
|
||||||
|
interface: None,
|
||||||
|
bind_addresses: None,
|
||||||
|
},
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
|
}],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
Arc::new(Stats::new()),
|
||||||
|
)),
|
||||||
|
replay_checker: Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
|
||||||
|
buffer_pool: Arc::new(BufferPool::new()),
|
||||||
|
rng: Arc::new(SecureRandom::new()),
|
||||||
|
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
ip_tracker: Arc::new(UserIpTracker::new()),
|
||||||
|
beobachten: Arc::new(BeobachtenStore::new()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let hello = make_valid_tls_client_hello(&[0xAC; 16], 3, 600, 0x42);
|
||||||
|
let (server_side, mut client_side) = duplex(131072);
|
||||||
|
|
||||||
|
let started = Instant::now();
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.251:56901".parse().unwrap(),
|
||||||
|
harness.config,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&hello).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(started.elapsed() < Duration::from_millis(1), "fallback path took longer than 1ms");
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! redteam_tail_must_not_forward_case {
|
||||||
|
($name:ident, $hex:expr, $secret:expr, $ts:expr, $len:expr) => {
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "red-team expected-fail: strict no-forwarding hypothesis"]
|
||||||
|
async fn $name() {
|
||||||
|
let mut tail = vec![0u8; $len];
|
||||||
|
for (i, b) in tail.iter_mut().enumerate() {
|
||||||
|
*b = (i as u8).wrapping_mul(31).wrapping_add(7);
|
||||||
|
}
|
||||||
|
let forwarded = run_tls_success_mtproto_fail_session($hex, $secret, $ts, tail).await;
|
||||||
|
assert!(
|
||||||
|
forwarded.is_empty(),
|
||||||
|
"strict model expects zero forwarded bytes, got {}",
|
||||||
|
forwarded.len()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_04_tail_len_1_not_forwarded, "adadadadadadadadadadadadadadadad", [0xAD; 16], 4, 1);
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_05_tail_len_2_not_forwarded, "aeaeaeaeaeaeaeaeaeaeaeaeaeaeaeae", [0xAE; 16], 5, 2);
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_06_tail_len_3_not_forwarded, "afafafafafafafafafafafafafafafaf", [0xAF; 16], 6, 3);
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_07_tail_len_7_not_forwarded, "b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0b0", [0xB0; 16], 7, 7);
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_08_tail_len_15_not_forwarded, "b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1b1", [0xB1; 16], 8, 15);
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_09_tail_len_63_not_forwarded, "b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2b2", [0xB2; 16], 9, 63);
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_10_tail_len_127_not_forwarded, "b3b3b3b3b3b3b3b3b3b3b3b3b3b3b3b3", [0xB3; 16], 10, 127);
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_11_tail_len_255_not_forwarded, "b4b4b4b4b4b4b4b4b4b4b4b4b4b4b4b4", [0xB4; 16], 11, 255);
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_12_tail_len_511_not_forwarded, "b5b5b5b5b5b5b5b5b5b5b5b5b5b5b5b5", [0xB5; 16], 12, 511);
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_13_tail_len_1023_not_forwarded, "b6b6b6b6b6b6b6b6b6b6b6b6b6b6b6b6", [0xB6; 16], 13, 1023);
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_14_tail_len_2047_not_forwarded, "b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7", [0xB7; 16], 14, 2047);
|
||||||
|
redteam_tail_must_not_forward_case!(redteam_15_tail_len_4095_not_forwarded, "b8b8b8b8b8b8b8b8b8b8b8b8b8b8b8b8", [0xB8; 16], 15, 4095);
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "red-team expected-fail: impossible indistinguishability envelope"]
|
||||||
|
async fn redteam_16_timing_delta_between_paths_must_be_sub_1ms_under_concurrency() {
|
||||||
|
let runs = 20usize;
|
||||||
|
let mut durations = Vec::with_capacity(runs);
|
||||||
|
|
||||||
|
for i in 0..runs {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
let secret = [0xB9u8; 16];
|
||||||
|
let harness = build_harness("b9b9b9b9b9b9b9b9b9b9b9b9b9b9b9b9", backend_addr.port());
|
||||||
|
let hello = make_valid_tls_client_hello(&secret, 100 + i as u32, 600, 0x42);
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (_stream, _) = listener.accept().await.unwrap();
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(65536);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.252:56902".parse().unwrap(),
|
||||||
|
harness.config,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let started = Instant::now();
|
||||||
|
client_side.write_all(&hello).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
durations.push(started.elapsed());
|
||||||
|
}
|
||||||
|
|
||||||
|
let min = durations.iter().copied().min().unwrap();
|
||||||
|
let max = durations.iter().copied().max().unwrap();
|
||||||
|
assert!(max - min <= Duration::from_millis(1), "timing spread too wide for strict anti-probing envelope");
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn measure_invalid_probe_duration_ms(
|
||||||
|
delay_ms: u64,
|
||||||
|
tls_len: u16,
|
||||||
|
body_sent: usize,
|
||||||
|
) -> u128 {
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = 1;
|
||||||
|
cfg.timeouts.client_handshake = 1;
|
||||||
|
cfg.censorship.server_hello_delay_min_ms = delay_ms;
|
||||||
|
cfg.censorship.server_hello_delay_max_ms = delay_ms;
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(65536);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.253:56903".parse().unwrap(),
|
||||||
|
Arc::new(cfg),
|
||||||
|
Arc::new(Stats::new()),
|
||||||
|
Arc::new(UpstreamManager::new(
|
||||||
|
vec![UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct {
|
||||||
|
interface: None,
|
||||||
|
bind_addresses: None,
|
||||||
|
},
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
|
}],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
Arc::new(Stats::new()),
|
||||||
|
)),
|
||||||
|
Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut probe = vec![0u8; 5 + body_sent];
|
||||||
|
probe[0] = 0x16;
|
||||||
|
probe[1] = 0x03;
|
||||||
|
probe[2] = 0x01;
|
||||||
|
probe[3..5].copy_from_slice(&tls_len.to_be_bytes());
|
||||||
|
probe[5..].fill(0xD7);
|
||||||
|
|
||||||
|
let started = Instant::now();
|
||||||
|
client_side.write_all(&probe).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
started.elapsed().as_millis()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn capture_forwarded_probe_len(tls_len: u16, body_sent: usize) -> usize {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
cfg.timeouts.client_handshake = 1;
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = Vec::new();
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(2), stream.read_to_end(&mut got)).await;
|
||||||
|
got.len()
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(65536);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.254:56904".parse().unwrap(),
|
||||||
|
Arc::new(cfg),
|
||||||
|
Arc::new(Stats::new()),
|
||||||
|
Arc::new(UpstreamManager::new(
|
||||||
|
vec![UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct {
|
||||||
|
interface: None,
|
||||||
|
bind_addresses: None,
|
||||||
|
},
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
|
}],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
Arc::new(Stats::new()),
|
||||||
|
)),
|
||||||
|
Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut probe = vec![0u8; 5 + body_sent];
|
||||||
|
probe[0] = 0x16;
|
||||||
|
probe[1] = 0x03;
|
||||||
|
probe[2] = 0x01;
|
||||||
|
probe[3..5].copy_from_slice(&tls_len.to_be_bytes());
|
||||||
|
probe[5..].fill(0xBC);
|
||||||
|
|
||||||
|
client_side.write_all(&probe).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
tokio::time::timeout(Duration::from_secs(4), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! redteam_timing_envelope_case {
|
||||||
|
($name:ident, $delay_ms:expr, $tls_len:expr, $body_sent:expr, $max_ms:expr) => {
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "red-team expected-fail: unrealistically tight reject timing envelope"]
|
||||||
|
async fn $name() {
|
||||||
|
let elapsed_ms = measure_invalid_probe_duration_ms($delay_ms, $tls_len, $body_sent).await;
|
||||||
|
assert!(
|
||||||
|
elapsed_ms <= $max_ms,
|
||||||
|
"timing envelope violated: elapsed={}ms, max={}ms",
|
||||||
|
elapsed_ms,
|
||||||
|
$max_ms
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! redteam_constant_shape_case {
|
||||||
|
($name:ident, $tls_len:expr, $body_sent:expr, $expected_len:expr) => {
|
||||||
|
#[tokio::test]
|
||||||
|
#[ignore = "red-team expected-fail: strict constant-shape backend fingerprint hypothesis"]
|
||||||
|
async fn $name() {
|
||||||
|
let got = capture_forwarded_probe_len($tls_len, $body_sent).await;
|
||||||
|
assert_eq!(
|
||||||
|
got,
|
||||||
|
$expected_len,
|
||||||
|
"fingerprint shape mismatch: got={} expected={} (strict constant-shape model)",
|
||||||
|
got,
|
||||||
|
$expected_len
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
redteam_timing_envelope_case!(redteam_17_timing_env_very_tight_00, 700, 600, 0, 3);
|
||||||
|
redteam_timing_envelope_case!(redteam_18_timing_env_very_tight_01, 700, 600, 1, 3);
|
||||||
|
redteam_timing_envelope_case!(redteam_19_timing_env_very_tight_02, 700, 600, 7, 3);
|
||||||
|
redteam_timing_envelope_case!(redteam_20_timing_env_very_tight_03, 700, 600, 17, 3);
|
||||||
|
redteam_timing_envelope_case!(redteam_21_timing_env_very_tight_04, 700, 600, 31, 3);
|
||||||
|
redteam_timing_envelope_case!(redteam_22_timing_env_very_tight_05, 700, 600, 63, 3);
|
||||||
|
redteam_timing_envelope_case!(redteam_23_timing_env_very_tight_06, 700, 600, 127, 3);
|
||||||
|
redteam_timing_envelope_case!(redteam_24_timing_env_very_tight_07, 700, 600, 255, 3);
|
||||||
|
redteam_timing_envelope_case!(redteam_25_timing_env_very_tight_08, 700, 600, 511, 3);
|
||||||
|
redteam_timing_envelope_case!(redteam_26_timing_env_very_tight_09, 700, 600, 1023, 3);
|
||||||
|
redteam_timing_envelope_case!(redteam_27_timing_env_very_tight_10, 700, 600, 2047, 3);
|
||||||
|
redteam_timing_envelope_case!(redteam_28_timing_env_very_tight_11, 700, 600, 4095, 3);
|
||||||
|
|
||||||
|
redteam_constant_shape_case!(redteam_29_constant_shape_00, 600, 0, 517);
|
||||||
|
redteam_constant_shape_case!(redteam_30_constant_shape_01, 600, 1, 517);
|
||||||
|
redteam_constant_shape_case!(redteam_31_constant_shape_02, 600, 7, 517);
|
||||||
|
redteam_constant_shape_case!(redteam_32_constant_shape_03, 600, 17, 517);
|
||||||
|
redteam_constant_shape_case!(redteam_33_constant_shape_04, 600, 31, 517);
|
||||||
|
redteam_constant_shape_case!(redteam_34_constant_shape_05, 600, 63, 517);
|
||||||
|
redteam_constant_shape_case!(redteam_35_constant_shape_06, 600, 127, 517);
|
||||||
|
redteam_constant_shape_case!(redteam_36_constant_shape_07, 600, 255, 517);
|
||||||
|
redteam_constant_shape_case!(redteam_37_constant_shape_08, 600, 511, 517);
|
||||||
|
redteam_constant_shape_case!(redteam_38_constant_shape_09, 600, 1023, 517);
|
||||||
|
redteam_constant_shape_case!(redteam_39_constant_shape_10, 600, 2047, 517);
|
||||||
|
redteam_constant_shape_case!(redteam_40_constant_shape_11, 600, 4095, 517);
|
||||||
|
|
@ -0,0 +1,122 @@
|
||||||
|
use super::*;
|
||||||
|
use crate::config::{UpstreamConfig, UpstreamType};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::time::Duration;
|
||||||
|
|
||||||
|
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||||
|
Arc::new(UpstreamManager::new(
|
||||||
|
vec![UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct {
|
||||||
|
interface: None,
|
||||||
|
bind_addresses: None,
|
||||||
|
},
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
|
}],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
stats,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_probe_capture(
|
||||||
|
body_sent: usize,
|
||||||
|
tls_len: u16,
|
||||||
|
enable_shape_hardening: bool,
|
||||||
|
floor: usize,
|
||||||
|
cap: usize,
|
||||||
|
) -> Vec<u8> {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = backend_addr.port();
|
||||||
|
cfg.censorship.mask_shape_hardening = enable_shape_hardening;
|
||||||
|
cfg.censorship.mask_shape_bucket_floor_bytes = floor;
|
||||||
|
cfg.censorship.mask_shape_bucket_cap_bytes = cap;
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut got = Vec::new();
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(2), stream.read_to_end(&mut got)).await;
|
||||||
|
got
|
||||||
|
});
|
||||||
|
|
||||||
|
let (server_side, mut client_side) = duplex(65536);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
"198.51.100.188:56888".parse().unwrap(),
|
||||||
|
Arc::new(cfg),
|
||||||
|
Arc::new(Stats::new()),
|
||||||
|
new_upstream_manager(Arc::new(Stats::new())),
|
||||||
|
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||||
|
Arc::new(BufferPool::new()),
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
None,
|
||||||
|
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
None,
|
||||||
|
Arc::new(UserIpTracker::new()),
|
||||||
|
Arc::new(BeobachtenStore::new()),
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut probe = vec![0u8; 5 + body_sent];
|
||||||
|
probe[0] = 0x16;
|
||||||
|
probe[1] = 0x03;
|
||||||
|
probe[2] = 0x01;
|
||||||
|
probe[3..5].copy_from_slice(&tls_len.to_be_bytes());
|
||||||
|
probe[5..].fill(0x66);
|
||||||
|
|
||||||
|
client_side.write_all(&probe).await.unwrap();
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
assert!(result.is_ok());
|
||||||
|
|
||||||
|
tokio::time::timeout(Duration::from_secs(4), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn shape_hardening_disabled_keeps_original_probe_length() {
|
||||||
|
let got = run_probe_capture(17, 600, false, 512, 4096).await;
|
||||||
|
assert_eq!(got.len(), 22);
|
||||||
|
assert_eq!(&got[..5], &[0x16, 0x03, 0x01, 0x02, 0x58]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn shape_hardening_enabled_pads_small_probe_to_floor_bucket() {
|
||||||
|
let got = run_probe_capture(17, 600, true, 512, 4096).await;
|
||||||
|
assert_eq!(got.len(), 512);
|
||||||
|
assert_eq!(&got[..5], &[0x16, 0x03, 0x01, 0x02, 0x58]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn shape_hardening_enabled_pads_mid_probe_to_next_bucket() {
|
||||||
|
let got = run_probe_capture(511, 600, true, 512, 4096).await;
|
||||||
|
assert_eq!(got.len(), 1024);
|
||||||
|
assert_eq!(&got[..5], &[0x16, 0x03, 0x01, 0x02, 0x58]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn shape_hardening_respects_cap_and_avoids_padding_above_cap() {
|
||||||
|
let got = run_probe_capture(5000, 7000, true, 512, 4096).await;
|
||||||
|
assert_eq!(got.len(), 5005);
|
||||||
|
assert_eq!(&got[..5], &[0x16, 0x03, 0x01, 0x1b, 0x58]);
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,254 @@
|
||||||
|
use super::*;
|
||||||
|
use crate::config::{UpstreamConfig, UpstreamType};
|
||||||
|
use crate::crypto::sha256_hmac;
|
||||||
|
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_RECORD_APPLICATION, TLS_VERSION};
|
||||||
|
use crate::protocol::tls;
|
||||||
|
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
|
||||||
|
use tokio::net::TcpListener;
|
||||||
|
use tokio::time::Duration;
|
||||||
|
|
||||||
|
struct StressHarness {
|
||||||
|
config: Arc<ProxyConfig>,
|
||||||
|
stats: Arc<Stats>,
|
||||||
|
upstream_manager: Arc<UpstreamManager>,
|
||||||
|
replay_checker: Arc<ReplayChecker>,
|
||||||
|
buffer_pool: Arc<BufferPool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
route_runtime: Arc<RouteRuntimeController>,
|
||||||
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
|
beobachten: Arc<BeobachtenStore>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||||
|
Arc::new(UpstreamManager::new(
|
||||||
|
vec![UpstreamConfig {
|
||||||
|
upstream_type: UpstreamType::Direct {
|
||||||
|
interface: None,
|
||||||
|
bind_addresses: None,
|
||||||
|
},
|
||||||
|
weight: 1,
|
||||||
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
|
}],
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
1,
|
||||||
|
false,
|
||||||
|
stats,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_harness(mask_port: u16, secret_hex: &str) -> StressHarness {
|
||||||
|
let mut cfg = ProxyConfig::default();
|
||||||
|
cfg.general.beobachten = false;
|
||||||
|
cfg.censorship.mask = true;
|
||||||
|
cfg.censorship.mask_unix_sock = None;
|
||||||
|
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||||
|
cfg.censorship.mask_port = mask_port;
|
||||||
|
cfg.censorship.mask_proxy_protocol = 0;
|
||||||
|
cfg.access.ignore_time_skew = true;
|
||||||
|
cfg.access
|
||||||
|
.users
|
||||||
|
.insert("user".to_string(), secret_hex.to_string());
|
||||||
|
|
||||||
|
let config = Arc::new(cfg);
|
||||||
|
let stats = Arc::new(Stats::new());
|
||||||
|
|
||||||
|
StressHarness {
|
||||||
|
config,
|
||||||
|
stats: stats.clone(),
|
||||||
|
upstream_manager: new_upstream_manager(stats),
|
||||||
|
replay_checker: Arc::new(ReplayChecker::new(1024, Duration::from_secs(60))),
|
||||||
|
buffer_pool: Arc::new(BufferPool::new()),
|
||||||
|
rng: Arc::new(SecureRandom::new()),
|
||||||
|
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||||
|
ip_tracker: Arc::new(UserIpTracker::new()),
|
||||||
|
beobachten: Arc::new(BeobachtenStore::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
|
||||||
|
assert!(tls_len <= u16::MAX as usize, "TLS length must fit into record header");
|
||||||
|
|
||||||
|
let total_len = 5 + tls_len;
|
||||||
|
let mut handshake = vec![fill; total_len];
|
||||||
|
|
||||||
|
handshake[0] = 0x16;
|
||||||
|
handshake[1] = 0x03;
|
||||||
|
handshake[2] = 0x01;
|
||||||
|
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||||
|
|
||||||
|
let session_id_len: usize = 32;
|
||||||
|
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||||
|
|
||||||
|
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||||
|
let computed = sha256_hmac(secret, &handshake);
|
||||||
|
let mut digest = computed;
|
||||||
|
let ts = timestamp.to_le_bytes();
|
||||||
|
for i in 0..4 {
|
||||||
|
digest[28 + i] ^= ts[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||||
|
.copy_from_slice(&digest);
|
||||||
|
handshake
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
|
||||||
|
let mut record = Vec::with_capacity(5 + payload.len());
|
||||||
|
record.push(TLS_RECORD_APPLICATION);
|
||||||
|
record.extend_from_slice(&TLS_VERSION);
|
||||||
|
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
|
||||||
|
record.extend_from_slice(payload);
|
||||||
|
record
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn read_tls_record_body<T>(stream: &mut T, header: [u8; 5])
|
||||||
|
where
|
||||||
|
T: tokio::io::AsyncRead + Unpin,
|
||||||
|
{
|
||||||
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
|
let mut body = vec![0u8; len];
|
||||||
|
stream.read_exact(&mut body).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run_parallel_tail_fallback_case(
|
||||||
|
sessions: usize,
|
||||||
|
payload_len: usize,
|
||||||
|
write_chunk: usize,
|
||||||
|
ts_base: u32,
|
||||||
|
peer_port_base: u16,
|
||||||
|
) {
|
||||||
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
|
let mut expected = std::collections::HashSet::new();
|
||||||
|
for idx in 0..sessions {
|
||||||
|
let payload = vec![((idx * 37) & 0xff) as u8; payload_len + idx % 3];
|
||||||
|
expected.insert(wrap_tls_application_data(&payload));
|
||||||
|
}
|
||||||
|
|
||||||
|
let accept_task = tokio::spawn(async move {
|
||||||
|
let mut remaining = expected;
|
||||||
|
for _ in 0..sessions {
|
||||||
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
let mut header = [0u8; 5];
|
||||||
|
stream.read_exact(&mut header).await.unwrap();
|
||||||
|
assert_eq!(header[0], TLS_RECORD_APPLICATION);
|
||||||
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
|
let mut record = vec![0u8; 5 + len];
|
||||||
|
record[..5].copy_from_slice(&header);
|
||||||
|
stream.read_exact(&mut record[5..]).await.unwrap();
|
||||||
|
assert!(remaining.remove(&record));
|
||||||
|
}
|
||||||
|
assert!(remaining.is_empty());
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut tasks = Vec::with_capacity(sessions);
|
||||||
|
|
||||||
|
for idx in 0..sessions {
|
||||||
|
let harness = build_harness(backend_addr.port(), "e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0e0");
|
||||||
|
let hello = make_valid_tls_client_hello(
|
||||||
|
&[0xE0; 16],
|
||||||
|
ts_base + idx as u32,
|
||||||
|
600,
|
||||||
|
0x40 + (idx as u8),
|
||||||
|
);
|
||||||
|
|
||||||
|
let invalid_mtproto = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
|
let payload = vec![((idx * 37) & 0xff) as u8; payload_len + idx % 3];
|
||||||
|
let trailing = wrap_tls_application_data(&payload);
|
||||||
|
// Keep source IPs unique across stress cases so global pre-auth probe state
|
||||||
|
// cannot contaminate unrelated sessions and make this test nondeterministic.
|
||||||
|
let peer_ip_third = 100 + ((ts_base as u8) / 10);
|
||||||
|
let peer_ip_fourth = (idx as u8).saturating_add(1);
|
||||||
|
let peer: SocketAddr = format!(
|
||||||
|
"198.51.{}.{}:{}",
|
||||||
|
peer_ip_third,
|
||||||
|
peer_ip_fourth,
|
||||||
|
peer_port_base + idx as u16
|
||||||
|
)
|
||||||
|
.parse()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
tasks.push(tokio::spawn(async move {
|
||||||
|
let (server_side, mut client_side) = duplex(262144);
|
||||||
|
let handler = tokio::spawn(handle_client_stream(
|
||||||
|
server_side,
|
||||||
|
peer,
|
||||||
|
harness.config,
|
||||||
|
harness.stats,
|
||||||
|
harness.upstream_manager,
|
||||||
|
harness.replay_checker,
|
||||||
|
harness.buffer_pool,
|
||||||
|
harness.rng,
|
||||||
|
None,
|
||||||
|
harness.route_runtime,
|
||||||
|
None,
|
||||||
|
harness.ip_tracker,
|
||||||
|
harness.beobachten,
|
||||||
|
false,
|
||||||
|
));
|
||||||
|
|
||||||
|
client_side.write_all(&hello).await.unwrap();
|
||||||
|
let mut server_hello_head = [0u8; 5];
|
||||||
|
client_side.read_exact(&mut server_hello_head).await.unwrap();
|
||||||
|
assert_eq!(server_hello_head[0], 0x16);
|
||||||
|
read_tls_record_body(&mut client_side, server_hello_head).await;
|
||||||
|
|
||||||
|
client_side.write_all(&invalid_mtproto).await.unwrap();
|
||||||
|
for chunk in trailing.chunks(write_chunk.max(1)) {
|
||||||
|
client_side.write_all(chunk).await.unwrap();
|
||||||
|
}
|
||||||
|
client_side.shutdown().await.unwrap();
|
||||||
|
|
||||||
|
let _ = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for task in tasks {
|
||||||
|
task.await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::time::timeout(Duration::from_secs(8), accept_task)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! stress_case {
|
||||||
|
($name:ident, $sessions:expr, $payload_len:expr, $chunk:expr, $ts:expr, $port:expr) => {
|
||||||
|
#[tokio::test]
|
||||||
|
async fn $name() {
|
||||||
|
run_parallel_tail_fallback_case($sessions, $payload_len, $chunk, $ts, $port).await;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
stress_case!(stress_masking_parallel_s01, 4, 16, 1, 1000, 57000);
|
||||||
|
stress_case!(stress_masking_parallel_s02, 5, 24, 2, 1010, 57010);
|
||||||
|
stress_case!(stress_masking_parallel_s03, 6, 32, 3, 1020, 57020);
|
||||||
|
stress_case!(stress_masking_parallel_s04, 7, 40, 4, 1030, 57030);
|
||||||
|
stress_case!(stress_masking_parallel_s05, 8, 48, 5, 1040, 57040);
|
||||||
|
stress_case!(stress_masking_parallel_s06, 9, 56, 6, 1050, 57050);
|
||||||
|
stress_case!(stress_masking_parallel_s07, 10, 64, 7, 1060, 57060);
|
||||||
|
stress_case!(stress_masking_parallel_s08, 11, 72, 8, 1070, 57070);
|
||||||
|
stress_case!(stress_masking_parallel_s09, 12, 80, 9, 1080, 57080);
|
||||||
|
stress_case!(stress_masking_parallel_s10, 13, 88, 10, 1090, 57090);
|
||||||
|
stress_case!(stress_masking_parallel_s11, 6, 128, 11, 1100, 57100);
|
||||||
|
stress_case!(stress_masking_parallel_s12, 7, 160, 12, 1110, 57110);
|
||||||
|
stress_case!(stress_masking_parallel_s13, 8, 192, 13, 1120, 57120);
|
||||||
|
stress_case!(stress_masking_parallel_s14, 9, 224, 14, 1130, 57130);
|
||||||
|
stress_case!(stress_masking_parallel_s15, 10, 256, 15, 1140, 57140);
|
||||||
|
stress_case!(stress_masking_parallel_s16, 11, 288, 16, 1150, 57150);
|
||||||
|
stress_case!(stress_masking_parallel_s17, 12, 320, 17, 1160, 57160);
|
||||||
|
stress_case!(stress_masking_parallel_s18, 13, 352, 18, 1170, 57170);
|
||||||
|
stress_case!(stress_masking_parallel_s19, 14, 384, 19, 1180, 57180);
|
||||||
|
stress_case!(stress_masking_parallel_s20, 15, 416, 20, 1190, 57190);
|
||||||
|
stress_case!(stress_masking_parallel_s21, 16, 448, 21, 1200, 57200);
|
||||||
|
stress_case!(stress_masking_parallel_s22, 17, 480, 22, 1210, 57210);
|
||||||
|
|
@ -1325,14 +1325,9 @@ async fn valid_tls_with_invalid_mtproto_falls_back_to_mask_backend() {
|
||||||
let trailing_tls_payload = b"still-tls-after-fallback".to_vec();
|
let trailing_tls_payload = b"still-tls-after-fallback".to_vec();
|
||||||
let trailing_tls_record = wrap_tls_application_data(&trailing_tls_payload);
|
let trailing_tls_record = wrap_tls_application_data(&trailing_tls_payload);
|
||||||
|
|
||||||
let expected_fallback = client_hello.clone();
|
|
||||||
let expected_trailing_tls_record = trailing_tls_record.clone();
|
let expected_trailing_tls_record = trailing_tls_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got = vec![0u8; expected_fallback.len()];
|
|
||||||
stream.read_exact(&mut got).await.unwrap();
|
|
||||||
assert_eq!(got, expected_fallback);
|
|
||||||
|
|
||||||
let mut trailing = vec![0u8; expected_trailing_tls_record.len()];
|
let mut trailing = vec![0u8; expected_trailing_tls_record.len()];
|
||||||
stream.read_exact(&mut trailing).await.unwrap();
|
stream.read_exact(&mut trailing).await.unwrap();
|
||||||
assert_eq!(trailing, expected_trailing_tls_record);
|
assert_eq!(trailing, expected_trailing_tls_record);
|
||||||
|
|
@ -1432,14 +1427,9 @@ async fn client_handler_tls_bad_mtproto_is_forwarded_to_mask_backend() {
|
||||||
let trailing_tls_payload = b"second-tls-record".to_vec();
|
let trailing_tls_payload = b"second-tls-record".to_vec();
|
||||||
let trailing_tls_record = wrap_tls_application_data(&trailing_tls_payload);
|
let trailing_tls_record = wrap_tls_application_data(&trailing_tls_payload);
|
||||||
|
|
||||||
let expected_fallback = client_hello.clone();
|
|
||||||
let expected_trailing_tls_record = trailing_tls_record.clone();
|
let expected_trailing_tls_record = trailing_tls_record.clone();
|
||||||
let mask_accept_task = tokio::spawn(async move {
|
let mask_accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = mask_listener.accept().await.unwrap();
|
let (mut stream, _) = mask_listener.accept().await.unwrap();
|
||||||
let mut got = vec![0u8; expected_fallback.len()];
|
|
||||||
stream.read_exact(&mut got).await.unwrap();
|
|
||||||
assert_eq!(got, expected_fallback);
|
|
||||||
|
|
||||||
let mut trailing = vec![0u8; expected_trailing_tls_record.len()];
|
let mut trailing = vec![0u8; expected_trailing_tls_record.len()];
|
||||||
stream.read_exact(&mut trailing).await.unwrap();
|
stream.read_exact(&mut trailing).await.unwrap();
|
||||||
assert_eq!(trailing, expected_trailing_tls_record);
|
assert_eq!(trailing, expected_trailing_tls_record);
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,12 @@
|
||||||
use super::*;
|
use super::*;
|
||||||
use crate::config::{UpstreamConfig, UpstreamType};
|
use crate::config::{UpstreamConfig, UpstreamType};
|
||||||
use crate::crypto::sha256_hmac;
|
use crate::crypto::sha256_hmac;
|
||||||
use crate::protocol::constants::{HANDSHAKE_LEN, MAX_TLS_CIPHERTEXT_SIZE, TLS_VERSION};
|
use crate::protocol::constants::{
|
||||||
|
HANDSHAKE_LEN,
|
||||||
|
MAX_TLS_CIPHERTEXT_SIZE,
|
||||||
|
TLS_RECORD_APPLICATION,
|
||||||
|
TLS_VERSION,
|
||||||
|
};
|
||||||
use crate::protocol::tls;
|
use crate::protocol::tls;
|
||||||
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
|
use tokio::io::{duplex, AsyncReadExt, AsyncWriteExt};
|
||||||
use tokio::net::TcpListener;
|
use tokio::net::TcpListener;
|
||||||
|
|
@ -137,17 +142,11 @@ async fn tls_bad_mtproto_fallback_preserves_wire_and_backend_response() {
|
||||||
let trailing_payload = b"masked-trailing-record".to_vec();
|
let trailing_payload = b"masked-trailing-record".to_vec();
|
||||||
let trailing_record = wrap_tls_application_data(&trailing_payload);
|
let trailing_record = wrap_tls_application_data(&trailing_payload);
|
||||||
let backend_response = b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nOK".to_vec();
|
let backend_response = b"HTTP/1.1 200 OK\r\nContent-Length: 2\r\n\r\nOK".to_vec();
|
||||||
|
|
||||||
let expected_client_hello = client_hello.clone();
|
|
||||||
let expected_trailing_record = trailing_record.clone();
|
let expected_trailing_record = trailing_record.clone();
|
||||||
let expected_response = backend_response.clone();
|
let expected_response = backend_response.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
|
||||||
let mut got_hello = vec![0u8; expected_client_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_client_hello);
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
||||||
stream.read_exact(&mut got_trailing).await.unwrap();
|
stream.read_exact(&mut got_trailing).await.unwrap();
|
||||||
assert_eq!(got_trailing, expected_trailing_record);
|
assert_eq!(got_trailing, expected_trailing_record);
|
||||||
|
|
@ -208,16 +207,10 @@ async fn tls_bad_mtproto_fallback_keeps_connects_bad_accounting() {
|
||||||
let invalid_mtproto = vec![0u8; HANDSHAKE_LEN];
|
let invalid_mtproto = vec![0u8; HANDSHAKE_LEN];
|
||||||
let invalid_mtproto_record = wrap_tls_application_data(&invalid_mtproto);
|
let invalid_mtproto_record = wrap_tls_application_data(&invalid_mtproto);
|
||||||
let trailing_record = wrap_tls_application_data(b"x");
|
let trailing_record = wrap_tls_application_data(b"x");
|
||||||
|
|
||||||
let expected_client_hello = client_hello.clone();
|
|
||||||
let expected_trailing_record = trailing_record.clone();
|
let expected_trailing_record = trailing_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
|
||||||
let mut got_hello = vec![0u8; expected_client_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_client_hello);
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
||||||
stream.read_exact(&mut got_trailing).await.unwrap();
|
stream.read_exact(&mut got_trailing).await.unwrap();
|
||||||
assert_eq!(got_trailing, expected_trailing_record);
|
assert_eq!(got_trailing, expected_trailing_record);
|
||||||
|
|
@ -281,16 +274,10 @@ async fn tls_bad_mtproto_fallback_forwards_zero_length_tls_record_verbatim() {
|
||||||
let invalid_mtproto = vec![0u8; HANDSHAKE_LEN];
|
let invalid_mtproto = vec![0u8; HANDSHAKE_LEN];
|
||||||
let invalid_mtproto_record = wrap_tls_application_data(&invalid_mtproto);
|
let invalid_mtproto_record = wrap_tls_application_data(&invalid_mtproto);
|
||||||
let trailing_record = wrap_tls_application_data(&[]);
|
let trailing_record = wrap_tls_application_data(&[]);
|
||||||
|
|
||||||
let expected_client_hello = client_hello.clone();
|
|
||||||
let expected_trailing_record = trailing_record.clone();
|
let expected_trailing_record = trailing_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
|
||||||
let mut got_hello = vec![0u8; expected_client_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_client_hello);
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
||||||
stream.read_exact(&mut got_trailing).await.unwrap();
|
stream.read_exact(&mut got_trailing).await.unwrap();
|
||||||
assert_eq!(got_trailing, expected_trailing_record);
|
assert_eq!(got_trailing, expected_trailing_record);
|
||||||
|
|
@ -349,16 +336,10 @@ async fn tls_bad_mtproto_fallback_forwards_max_tls_record_verbatim() {
|
||||||
let invalid_mtproto_record = wrap_tls_application_data(&invalid_mtproto);
|
let invalid_mtproto_record = wrap_tls_application_data(&invalid_mtproto);
|
||||||
let trailing_payload = vec![0xAB; MAX_TLS_CIPHERTEXT_SIZE];
|
let trailing_payload = vec![0xAB; MAX_TLS_CIPHERTEXT_SIZE];
|
||||||
let trailing_record = wrap_tls_application_data(&trailing_payload);
|
let trailing_record = wrap_tls_application_data(&trailing_payload);
|
||||||
|
|
||||||
let expected_client_hello = client_hello.clone();
|
|
||||||
let expected_trailing_record = trailing_record.clone();
|
let expected_trailing_record = trailing_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
|
||||||
let mut got_hello = vec![0u8; expected_client_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_client_hello);
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
||||||
stream.read_exact(&mut got_trailing).await.unwrap();
|
stream.read_exact(&mut got_trailing).await.unwrap();
|
||||||
assert_eq!(got_trailing, expected_trailing_record);
|
assert_eq!(got_trailing, expected_trailing_record);
|
||||||
|
|
@ -424,16 +405,10 @@ async fn tls_bad_mtproto_fallback_light_fuzz_tls_record_lengths_verbatim() {
|
||||||
*b = ((idx as u8).wrapping_mul(29)).wrapping_add(i as u8);
|
*b = ((idx as u8).wrapping_mul(29)).wrapping_add(i as u8);
|
||||||
}
|
}
|
||||||
let trailing_record = wrap_tls_application_data(&payload);
|
let trailing_record = wrap_tls_application_data(&payload);
|
||||||
|
|
||||||
let expected_client_hello = client_hello.clone();
|
|
||||||
let expected_trailing_record = trailing_record.clone();
|
let expected_trailing_record = trailing_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
|
||||||
let mut got_hello = vec![0u8; expected_client_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_client_hello);
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
||||||
stream.read_exact(&mut got_trailing).await.unwrap();
|
stream.read_exact(&mut got_trailing).await.unwrap();
|
||||||
assert_eq!(got_trailing, expected_trailing_record);
|
assert_eq!(got_trailing, expected_trailing_record);
|
||||||
|
|
@ -490,30 +465,34 @@ async fn tls_bad_mtproto_fallback_concurrent_sessions_are_isolated() {
|
||||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
let backend_addr = listener.local_addr().unwrap();
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
let mut expected_pairs = std::collections::HashMap::new();
|
let mut expected_records = std::collections::HashSet::new();
|
||||||
let secret = [0x86u8; 16];
|
let secret = [0x86u8; 16];
|
||||||
for idx in 0..sessions {
|
for idx in 0..sessions {
|
||||||
let hello = make_valid_tls_client_hello(&secret, idx as u32 + 100, 600, 0x60 + idx as u8);
|
let _hello = make_valid_tls_client_hello(&secret, idx as u32 + 100, 600, 0x60 + idx as u8);
|
||||||
let payload = vec![idx as u8; 64 + idx];
|
let payload = vec![idx as u8; 64 + idx];
|
||||||
let trailing = wrap_tls_application_data(&payload);
|
let trailing = wrap_tls_application_data(&payload);
|
||||||
expected_pairs.insert(hello, trailing);
|
expected_records.insert(trailing);
|
||||||
}
|
}
|
||||||
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let mut remaining = expected_pairs;
|
let mut remaining = expected_records;
|
||||||
for idx in 0..sessions {
|
for idx in 0..sessions {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
|
||||||
let _ = idx;
|
let _ = idx;
|
||||||
let mut got_hello = vec![0u8; 605];
|
let mut header = [0u8; 5];
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
stream.read_exact(&mut header).await.unwrap();
|
||||||
let expected_trailing = remaining
|
assert_eq!(header[0], TLS_RECORD_APPLICATION);
|
||||||
.remove(&got_hello)
|
|
||||||
.expect("unexpected client hello in concurrent isolation test");
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing.len()];
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
stream.read_exact(&mut got_trailing).await.unwrap();
|
let mut record = vec![0u8; 5 + len];
|
||||||
assert_eq!(got_trailing, expected_trailing);
|
record[..5].copy_from_slice(&header);
|
||||||
|
stream.read_exact(&mut record[5..]).await.unwrap();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
remaining.remove(&record),
|
||||||
|
"unexpected trailing TLS record in concurrent isolation test"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(remaining.is_empty(), "all expected client sessions must be matched exactly once");
|
assert!(remaining.is_empty(), "all expected client sessions must be matched exactly once");
|
||||||
|
|
@ -591,16 +570,10 @@ async fn tls_bad_mtproto_fallback_forwards_fragmented_client_writes_verbatim() {
|
||||||
let invalid_mtproto_record = wrap_tls_application_data(&invalid_mtproto);
|
let invalid_mtproto_record = wrap_tls_application_data(&invalid_mtproto);
|
||||||
let payload = b"fragmented-writes-to-test-stream-boundary-robustness".to_vec();
|
let payload = b"fragmented-writes-to-test-stream-boundary-robustness".to_vec();
|
||||||
let trailing_record = wrap_tls_application_data(&payload);
|
let trailing_record = wrap_tls_application_data(&payload);
|
||||||
|
|
||||||
let expected_client_hello = client_hello.clone();
|
|
||||||
let expected_trailing_record = trailing_record.clone();
|
let expected_trailing_record = trailing_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
|
||||||
let mut got_hello = vec![0u8; expected_client_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_client_hello);
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
let mut got_trailing = vec![0u8; expected_trailing_record.len()];
|
||||||
stream.read_exact(&mut got_trailing).await.unwrap();
|
stream.read_exact(&mut got_trailing).await.unwrap();
|
||||||
assert_eq!(got_trailing, expected_trailing_record);
|
assert_eq!(got_trailing, expected_trailing_record);
|
||||||
|
|
@ -660,14 +633,9 @@ async fn tls_bad_mtproto_fallback_header_fragmentation_bytewise_is_verbatim() {
|
||||||
let client_hello = make_valid_tls_client_hello(&secret, 10, 600, 0x58);
|
let client_hello = make_valid_tls_client_hello(&secret, 10, 600, 0x58);
|
||||||
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
let trailing_record = wrap_tls_application_data(b"bytewise-header");
|
let trailing_record = wrap_tls_application_data(b"bytewise-header");
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_trailing = trailing_record.clone();
|
let expected_trailing = trailing_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing.len()];
|
let mut got_trailing = vec![0u8; expected_trailing.len()];
|
||||||
stream.read_exact(&mut got_trailing).await.unwrap();
|
stream.read_exact(&mut got_trailing).await.unwrap();
|
||||||
|
|
@ -732,14 +700,9 @@ async fn tls_bad_mtproto_fallback_record_splitting_chaos_is_verbatim() {
|
||||||
*b = (i as u8).wrapping_mul(17).wrapping_add(3);
|
*b = (i as u8).wrapping_mul(17).wrapping_add(3);
|
||||||
}
|
}
|
||||||
let trailing_record = wrap_tls_application_data(&payload);
|
let trailing_record = wrap_tls_application_data(&payload);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_trailing = trailing_record.clone();
|
let expected_trailing = trailing_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing.len()];
|
let mut got_trailing = vec![0u8; expected_trailing.len()];
|
||||||
stream.read_exact(&mut got_trailing).await.unwrap();
|
stream.read_exact(&mut got_trailing).await.unwrap();
|
||||||
|
|
@ -811,14 +774,9 @@ async fn tls_bad_mtproto_fallback_multiple_tls_records_are_forwarded_in_order()
|
||||||
let r2 = wrap_tls_application_data(b"beta-beta");
|
let r2 = wrap_tls_application_data(b"beta-beta");
|
||||||
let r3 = wrap_tls_application_data(b"gamma-gamma-gamma");
|
let r3 = wrap_tls_application_data(b"gamma-gamma-gamma");
|
||||||
let expected = [r1.clone(), r2.clone(), r3.clone()].concat();
|
let expected = [r1.clone(), r2.clone(), r3.clone()].concat();
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_concat = expected.clone();
|
let expected_concat = expected.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got = vec![0u8; expected_concat.len()];
|
let mut got = vec![0u8; expected_concat.len()];
|
||||||
stream.read_exact(&mut got).await.unwrap();
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
|
@ -877,16 +835,10 @@ async fn tls_bad_mtproto_fallback_client_half_close_propagates_eof_to_backend()
|
||||||
let client_hello = make_valid_tls_client_hello(&secret, 13, 600, 0x5B);
|
let client_hello = make_valid_tls_client_hello(&secret, 13, 600, 0x5B);
|
||||||
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
let trailing_record = wrap_tls_application_data(b"half-close-probe");
|
let trailing_record = wrap_tls_application_data(b"half-close-probe");
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_trailing = trailing_record.clone();
|
let expected_trailing = trailing_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing.len()];
|
let mut got_trailing = vec![0u8; expected_trailing.len()];
|
||||||
stream.read_exact(&mut got_trailing).await.unwrap();
|
stream.read_exact(&mut got_trailing).await.unwrap();
|
||||||
assert_eq!(got_trailing, expected_trailing);
|
assert_eq!(got_trailing, expected_trailing);
|
||||||
|
|
@ -947,15 +899,10 @@ async fn tls_bad_mtproto_fallback_backend_half_close_after_response_is_tolerated
|
||||||
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
let trailing_record = wrap_tls_application_data(b"backend-half-close");
|
let trailing_record = wrap_tls_application_data(b"backend-half-close");
|
||||||
let backend_response = b"HTTP/1.1 204 No Content\r\nContent-Length: 0\r\n\r\n".to_vec();
|
let backend_response = b"HTTP/1.1 204 No Content\r\nContent-Length: 0\r\n\r\n".to_vec();
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_trailing = trailing_record.clone();
|
let expected_trailing = trailing_record.clone();
|
||||||
let response = backend_response.clone();
|
let response = backend_response.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing.len()];
|
let mut got_trailing = vec![0u8; expected_trailing.len()];
|
||||||
stream.read_exact(&mut got_trailing).await.unwrap();
|
stream.read_exact(&mut got_trailing).await.unwrap();
|
||||||
|
|
@ -1016,13 +963,8 @@ async fn tls_bad_mtproto_fallback_backend_reset_after_clienthello_is_handled() {
|
||||||
let client_hello = make_valid_tls_client_hello(&secret, 15, 600, 0x5D);
|
let client_hello = make_valid_tls_client_hello(&secret, 15, 600, 0x5D);
|
||||||
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
let trailing_record = wrap_tls_application_data(b"backend-reset");
|
let trailing_record = wrap_tls_application_data(b"backend-reset");
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
drop(stream);
|
drop(stream);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
@ -1082,16 +1024,10 @@ async fn tls_bad_mtproto_fallback_backend_slow_reader_preserves_byte_identity()
|
||||||
|
|
||||||
let payload = vec![0xEC; 8192];
|
let payload = vec![0xEC; 8192];
|
||||||
let trailing_record = wrap_tls_application_data(&payload);
|
let trailing_record = wrap_tls_application_data(&payload);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_trailing = trailing_record.clone();
|
let expected_trailing = trailing_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_trailing = vec![0u8; expected_trailing.len()];
|
let mut got_trailing = vec![0u8; expected_trailing.len()];
|
||||||
let mut offset = 0usize;
|
let mut offset = 0usize;
|
||||||
while offset < got_trailing.len() {
|
while offset < got_trailing.len() {
|
||||||
|
|
@ -1157,16 +1093,11 @@ async fn tls_bad_mtproto_fallback_replay_pressure_masks_replay_without_serverhel
|
||||||
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||||
let trailing_record = wrap_tls_application_data(b"first-session");
|
let trailing_record = wrap_tls_application_data(b"first-session");
|
||||||
|
|
||||||
let expected_first = replayed_hello.clone();
|
|
||||||
let expected_second = replayed_hello.clone();
|
let expected_second = replayed_hello.clone();
|
||||||
let expected_trailing = trailing_record.clone();
|
let expected_trailing = trailing_record.clone();
|
||||||
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut s1, _) = listener.accept().await.unwrap();
|
let (mut s1, _) = listener.accept().await.unwrap();
|
||||||
let mut got1 = vec![0u8; expected_first.len()];
|
|
||||||
s1.read_exact(&mut got1).await.unwrap();
|
|
||||||
assert_eq!(got1, expected_first);
|
|
||||||
|
|
||||||
let mut got1_tail = vec![0u8; expected_trailing.len()];
|
let mut got1_tail = vec![0u8; expected_trailing.len()];
|
||||||
s1.read_exact(&mut got1_tail).await.unwrap();
|
s1.read_exact(&mut got1_tail).await.unwrap();
|
||||||
assert_eq!(got1_tail, expected_trailing);
|
assert_eq!(got1_tail, expected_trailing);
|
||||||
|
|
@ -1269,14 +1200,9 @@ async fn tls_bad_mtproto_fallback_large_multi_record_chaos_under_backpressure()
|
||||||
let b = wrap_tls_application_data(&vec![0xB2; 3072]);
|
let b = wrap_tls_application_data(&vec![0xB2; 3072]);
|
||||||
let c = wrap_tls_application_data(&vec![0xC3; 1536]);
|
let c = wrap_tls_application_data(&vec![0xC3; 1536]);
|
||||||
let expected = [a.clone(), b.clone(), c.clone()].concat();
|
let expected = [a.clone(), b.clone(), c.clone()].concat();
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_payload = expected.clone();
|
let expected_payload = expected.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got = vec![0u8; expected_payload.len()];
|
let mut got = vec![0u8; expected_payload.len()];
|
||||||
let mut pos = 0usize;
|
let mut pos = 0usize;
|
||||||
|
|
@ -1355,14 +1281,9 @@ async fn tls_bad_mtproto_fallback_interleaved_control_and_application_records_ve
|
||||||
let app = wrap_tls_application_data(b"opaque");
|
let app = wrap_tls_application_data(b"opaque");
|
||||||
let alert = wrap_tls_record(0x15, &[0x01, 0x00]);
|
let alert = wrap_tls_record(0x15, &[0x01, 0x00]);
|
||||||
let expected = [ccs.clone(), app.clone(), alert.clone()].concat();
|
let expected = [ccs.clone(), app.clone(), alert.clone()].concat();
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_records = expected.clone();
|
let expected_records = expected.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got = vec![0u8; expected_records.len()];
|
let mut got = vec![0u8; expected_records.len()];
|
||||||
stream.read_exact(&mut got).await.unwrap();
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
|
@ -1418,30 +1339,34 @@ async fn tls_bad_mtproto_fallback_many_short_sessions_with_chaos_no_cross_leak()
|
||||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
let backend_addr = listener.local_addr().unwrap();
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
let mut expected_pairs = std::collections::HashMap::new();
|
let mut expected_records = std::collections::HashSet::new();
|
||||||
let secret = [0x92u8; 16];
|
let secret = [0x92u8; 16];
|
||||||
for idx in 0..sessions {
|
for idx in 0..sessions {
|
||||||
let hello = make_valid_tls_client_hello(&secret, idx as u32 + 200, 600, 0x70 + idx as u8);
|
let _hello = make_valid_tls_client_hello(&secret, idx as u32 + 200, 600, 0x70 + idx as u8);
|
||||||
let payload = vec![idx as u8; 33 + (idx % 17)];
|
let payload = vec![idx as u8; 33 + (idx % 17)];
|
||||||
let record = wrap_tls_application_data(&payload);
|
let record = wrap_tls_application_data(&payload);
|
||||||
expected_pairs.insert(hello, record);
|
expected_records.insert(record);
|
||||||
}
|
}
|
||||||
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let mut remaining = expected_pairs;
|
let mut remaining = expected_records;
|
||||||
for idx in 0..sessions {
|
for idx in 0..sessions {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
|
|
||||||
let _ = idx;
|
let _ = idx;
|
||||||
let mut got_hello = vec![0u8; 605];
|
let mut header = [0u8; 5];
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
stream.read_exact(&mut header).await.unwrap();
|
||||||
let expected_record = remaining
|
assert_eq!(header[0], TLS_RECORD_APPLICATION);
|
||||||
.remove(&got_hello)
|
|
||||||
.expect("unexpected client hello in short-session chaos test");
|
|
||||||
|
|
||||||
let mut got = vec![0u8; expected_record.len()];
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
stream.read_exact(&mut got).await.unwrap();
|
let mut record = vec![0u8; 5 + len];
|
||||||
assert_eq!(got, expected_record);
|
record[..5].copy_from_slice(&header);
|
||||||
|
stream.read_exact(&mut record[5..]).await.unwrap();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
remaining.remove(&record),
|
||||||
|
"unexpected trailing TLS record in short-session chaos test"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(remaining.is_empty(), "all expected sessions must be consumed exactly once");
|
assert!(remaining.is_empty(), "all expected sessions must be consumed exactly once");
|
||||||
|
|
@ -1518,14 +1443,9 @@ async fn tls_bad_mtproto_fallback_coalesced_tail_small_is_forwarded_as_tls_recor
|
||||||
let coalesced_tail = b"coalesced-tail-small".to_vec();
|
let coalesced_tail = b"coalesced-tail-small".to_vec();
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&coalesced_tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&coalesced_tail);
|
||||||
let expected_tail_record = wrap_tls_application_data(&coalesced_tail);
|
let expected_tail_record = wrap_tls_application_data(&coalesced_tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_tail = expected_tail_record.clone();
|
let expected_tail = expected_tail_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -1582,14 +1502,9 @@ async fn tls_bad_mtproto_fallback_coalesced_tail_large_is_forwarded_as_tls_recor
|
||||||
let coalesced_tail = vec![0xAB; 4096];
|
let coalesced_tail = vec![0xAB; 4096];
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&coalesced_tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&coalesced_tail);
|
||||||
let expected_tail_record = wrap_tls_application_data(&coalesced_tail);
|
let expected_tail_record = wrap_tls_application_data(&coalesced_tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_tail = expected_tail_record.clone();
|
let expected_tail = expected_tail_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -1648,14 +1563,9 @@ async fn tls_bad_mtproto_fallback_coalesced_tail_keeps_order_before_following_re
|
||||||
let expected_tail_record = wrap_tls_application_data(&coalesced_tail);
|
let expected_tail_record = wrap_tls_application_data(&coalesced_tail);
|
||||||
let following_record = wrap_tls_application_data(b"following-record");
|
let following_record = wrap_tls_application_data(b"following-record");
|
||||||
let expected_concat = [expected_tail_record.clone(), following_record.clone()].concat();
|
let expected_concat = [expected_tail_record.clone(), following_record.clone()].concat();
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_records = expected_concat.clone();
|
let expected_records = expected_concat.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_records = vec![0u8; expected_records.len()];
|
let mut got_records = vec![0u8; expected_records.len()];
|
||||||
stream.read_exact(&mut got_records).await.unwrap();
|
stream.read_exact(&mut got_records).await.unwrap();
|
||||||
|
|
@ -1713,14 +1623,9 @@ async fn tls_bad_mtproto_fallback_coalesced_tail_fragmented_client_write_is_forw
|
||||||
let coalesced_tail = vec![0xCD; 1536];
|
let coalesced_tail = vec![0xCD; 1536];
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&coalesced_tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&coalesced_tail);
|
||||||
let expected_tail_record = wrap_tls_application_data(&coalesced_tail);
|
let expected_tail_record = wrap_tls_application_data(&coalesced_tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_tail = expected_tail_record.clone();
|
let expected_tail = expected_tail_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -1789,14 +1694,9 @@ async fn tls_bad_mtproto_fallback_coalesced_tail_max_payload_is_forwarded() {
|
||||||
let coalesced_tail = vec![0xEF; MAX_TLS_CIPHERTEXT_SIZE - HANDSHAKE_LEN];
|
let coalesced_tail = vec![0xEF; MAX_TLS_CIPHERTEXT_SIZE - HANDSHAKE_LEN];
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&coalesced_tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&coalesced_tail);
|
||||||
let expected_tail_record = wrap_tls_application_data(&coalesced_tail);
|
let expected_tail_record = wrap_tls_application_data(&coalesced_tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_tail = expected_tail_record.clone();
|
let expected_tail = expected_tail_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -1854,14 +1754,9 @@ async fn blackhat_coalesced_tail_identical_following_record_must_not_duplicate_o
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
||||||
let tail_record = wrap_tls_application_data(&tail);
|
let tail_record = wrap_tls_application_data(&tail);
|
||||||
let expected = [tail_record.clone(), tail_record.clone()].concat();
|
let expected = [tail_record.clone(), tail_record.clone()].concat();
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_payload = expected.clone();
|
let expected_payload = expected.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got = vec![0u8; expected_payload.len()];
|
let mut got = vec![0u8; expected_payload.len()];
|
||||||
stream.read_exact(&mut got).await.unwrap();
|
stream.read_exact(&mut got).await.unwrap();
|
||||||
|
|
@ -1924,14 +1819,9 @@ async fn blackhat_coalesced_tail_tls_header_looking_bytes_must_stay_payload() {
|
||||||
tail.extend_from_slice(b"not-a-real-record-boundary");
|
tail.extend_from_slice(b"not-a-real-record-boundary");
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
||||||
let expected_tail_record = wrap_tls_application_data(&tail);
|
let expected_tail_record = wrap_tls_application_data(&tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_tail = expected_tail_record.clone();
|
let expected_tail = expected_tail_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -1988,14 +1878,9 @@ async fn blackhat_coalesced_tail_client_half_close_must_not_truncate_prepended_r
|
||||||
let tail = vec![0xAA; 3072];
|
let tail = vec![0xAA; 3072];
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
||||||
let expected_tail_record = wrap_tls_application_data(&tail);
|
let expected_tail_record = wrap_tls_application_data(&tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_tail = expected_tail_record.clone();
|
let expected_tail = expected_tail_record.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -2052,27 +1937,31 @@ async fn blackhat_coalesced_tail_multi_session_no_cross_bleed_under_churn() {
|
||||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
let backend_addr = listener.local_addr().unwrap();
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
let mut expected = std::collections::HashMap::new();
|
let mut expected = std::collections::HashSet::new();
|
||||||
let secret = [0xB4u8; 16];
|
let secret = [0xB4u8; 16];
|
||||||
for idx in 0..sessions {
|
for idx in 0..sessions {
|
||||||
let hello = make_valid_tls_client_hello(&secret, 450 + idx as u32, 600, 0x40 + idx as u8);
|
let _hello = make_valid_tls_client_hello(&secret, 450 + idx as u32, 600, 0x40 + idx as u8);
|
||||||
let tail = vec![idx as u8; 17 + idx];
|
let tail = vec![idx as u8; 17 + idx];
|
||||||
expected.insert(hello, wrap_tls_application_data(&tail));
|
expected.insert(wrap_tls_application_data(&tail));
|
||||||
}
|
}
|
||||||
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let mut remaining = expected;
|
let mut remaining = expected;
|
||||||
for _ in 0..sessions {
|
for _ in 0..sessions {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; 605];
|
let mut header = [0u8; 5];
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
stream.read_exact(&mut header).await.unwrap();
|
||||||
let expected_tail = remaining
|
assert_eq!(header[0], TLS_RECORD_APPLICATION);
|
||||||
.remove(&got_hello)
|
|
||||||
.expect("unexpected hello or duplicated session routing");
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
let mut record = vec![0u8; 5 + len];
|
||||||
assert_eq!(got_tail, expected_tail);
|
record[..5].copy_from_slice(&header);
|
||||||
|
stream.read_exact(&mut record[5..]).await.unwrap();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
remaining.remove(&record),
|
||||||
|
"unexpected record or duplicated session routing"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
assert!(remaining.is_empty(), "all sessions must map one-to-one");
|
assert!(remaining.is_empty(), "all sessions must map one-to-one");
|
||||||
});
|
});
|
||||||
|
|
@ -2144,13 +2033,8 @@ async fn blackhat_coalesced_tail_single_byte_tail_is_preserved() {
|
||||||
let tail = vec![0x7F];
|
let tail = vec![0x7F];
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
||||||
let expected_tail = wrap_tls_application_data(&tail);
|
let expected_tail = wrap_tls_application_data(&tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -2206,13 +2090,8 @@ async fn blackhat_coalesced_tail_exact_tls_header_size_payload_is_preserved() {
|
||||||
let tail = vec![0xAA, 0xBB, 0xCC, 0xDD, 0xEE];
|
let tail = vec![0xAA, 0xBB, 0xCC, 0xDD, 0xEE];
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
||||||
let expected_tail = wrap_tls_application_data(&tail);
|
let expected_tail = wrap_tls_application_data(&tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -2268,13 +2147,8 @@ async fn blackhat_coalesced_tail_all_zero_payload_is_preserved() {
|
||||||
let tail = vec![0u8; 2048];
|
let tail = vec![0u8; 2048];
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
||||||
let expected_tail = wrap_tls_application_data(&tail);
|
let expected_tail = wrap_tls_application_data(&tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -2334,14 +2208,9 @@ async fn blackhat_coalesced_tail_following_control_records_are_not_mutated() {
|
||||||
let alert = wrap_tls_record(0x15, &[0x01, 0x00]);
|
let alert = wrap_tls_record(0x15, &[0x01, 0x00]);
|
||||||
let app = wrap_tls_application_data(b"control-final-app");
|
let app = wrap_tls_application_data(b"control-final-app");
|
||||||
let expected = [tail_record, ccs.clone(), alert.clone(), app.clone()].concat();
|
let expected = [tail_record, ccs.clone(), alert.clone(), app.clone()].concat();
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_payload = expected.clone();
|
let expected_payload = expected.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_payload = vec![0u8; expected_payload.len()];
|
let mut got_payload = vec![0u8; expected_payload.len()];
|
||||||
stream.read_exact(&mut got_payload).await.unwrap();
|
stream.read_exact(&mut got_payload).await.unwrap();
|
||||||
|
|
@ -2404,14 +2273,9 @@ async fn blackhat_coalesced_tail_then_following_records_fragmented_chaos_stays_o
|
||||||
let r1 = wrap_tls_application_data(b"r1");
|
let r1 = wrap_tls_application_data(b"r1");
|
||||||
let r2 = wrap_tls_application_data(&vec![0xDD; 257]);
|
let r2 = wrap_tls_application_data(&vec![0xDD; 257]);
|
||||||
let expected = [tail_record, r1.clone(), r2.clone()].concat();
|
let expected = [tail_record, r1.clone(), r2.clone()].concat();
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_payload = expected.clone();
|
let expected_payload = expected.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_payload = vec![0u8; expected_payload.len()];
|
let mut got_payload = vec![0u8; expected_payload.len()];
|
||||||
stream.read_exact(&mut got_payload).await.unwrap();
|
stream.read_exact(&mut got_payload).await.unwrap();
|
||||||
|
|
@ -2480,14 +2344,9 @@ async fn blackhat_coalesced_tail_backend_response_integrity_after_fallback() {
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
||||||
let expected_tail = wrap_tls_application_data(&tail);
|
let expected_tail = wrap_tls_application_data(&tail);
|
||||||
let backend_response = b"HTTP/1.1 204 No Content\r\nContent-Length: 0\r\n\r\n".to_vec();
|
let backend_response = b"HTTP/1.1 204 No Content\r\nContent-Length: 0\r\n\r\n".to_vec();
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let expected_resp = backend_response.clone();
|
let expected_resp = backend_response.clone();
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -2574,13 +2433,8 @@ async fn blackhat_coalesced_tail_connects_bad_increments_exactly_once() {
|
||||||
let harness = build_harness("c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7", backend_addr.port());
|
let harness = build_harness("c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7c7", backend_addr.port());
|
||||||
let stats = harness.stats.clone();
|
let stats = harness.stats.clone();
|
||||||
let bad_before = stats.get_connects_bad();
|
let bad_before = stats.get_connects_bad();
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -2637,27 +2491,31 @@ async fn blackhat_coalesced_tail_parallel_32_sessions_no_cross_bleed() {
|
||||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||||
let backend_addr = listener.local_addr().unwrap();
|
let backend_addr = listener.local_addr().unwrap();
|
||||||
|
|
||||||
let mut expected = std::collections::HashMap::new();
|
let mut expected = std::collections::HashSet::new();
|
||||||
let secret = [0xC8u8; 16];
|
let secret = [0xC8u8; 16];
|
||||||
for idx in 0..sessions {
|
for idx in 0..sessions {
|
||||||
let hello = make_valid_tls_client_hello(&secret, 550 + idx as u32, 600, 0x20 + idx as u8);
|
let _hello = make_valid_tls_client_hello(&secret, 550 + idx as u32, 600, 0x20 + idx as u8);
|
||||||
let tail = vec![idx as u8; 48 + (idx % 11)];
|
let tail = vec![idx as u8; 48 + (idx % 11)];
|
||||||
expected.insert(hello, wrap_tls_application_data(&tail));
|
expected.insert(wrap_tls_application_data(&tail));
|
||||||
}
|
}
|
||||||
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let mut remaining = expected;
|
let mut remaining = expected;
|
||||||
for _ in 0..sessions {
|
for _ in 0..sessions {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; 605];
|
let mut header = [0u8; 5];
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
stream.read_exact(&mut header).await.unwrap();
|
||||||
let expected_tail = remaining
|
assert_eq!(header[0], TLS_RECORD_APPLICATION);
|
||||||
.remove(&got_hello)
|
|
||||||
.expect("session mixup detected in parallel-32 blackhat test");
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
let mut record = vec![0u8; 5 + len];
|
||||||
assert_eq!(got_tail, expected_tail);
|
record[..5].copy_from_slice(&header);
|
||||||
|
stream.read_exact(&mut record[5..]).await.unwrap();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
remaining.remove(&record),
|
||||||
|
"session mixup detected in parallel-32 blackhat test"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
assert!(remaining.is_empty(), "all expected sessions must be consumed");
|
assert!(remaining.is_empty(), "all expected sessions must be consumed");
|
||||||
});
|
});
|
||||||
|
|
@ -2734,13 +2592,8 @@ async fn blackhat_coalesced_tail_repeated_tls_like_prefixes_are_preserved() {
|
||||||
tail.extend_from_slice(b"suffix-data");
|
tail.extend_from_slice(b"suffix-data");
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
||||||
let expected_tail = wrap_tls_application_data(&tail);
|
let expected_tail = wrap_tls_application_data(&tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -2795,13 +2648,8 @@ async fn blackhat_coalesced_tail_drop_after_write_still_delivers_prepended_recor
|
||||||
let tail = vec![0xBE; 1024];
|
let tail = vec![0xBE; 1024];
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
||||||
let expected_tail = wrap_tls_application_data(&tail);
|
let expected_tail = wrap_tls_application_data(&tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
@ -2856,13 +2704,8 @@ async fn blackhat_coalesced_tail_zero_following_record_after_coalesced_is_not_in
|
||||||
let tail = b"terminal-tail".to_vec();
|
let tail = b"terminal-tail".to_vec();
|
||||||
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
let coalesced_record = wrap_invalid_mtproto_with_coalesced_tail(&tail);
|
||||||
let expected_tail = wrap_tls_application_data(&tail);
|
let expected_tail = wrap_tls_application_data(&tail);
|
||||||
|
|
||||||
let expected_hello = client_hello.clone();
|
|
||||||
let accept_task = tokio::spawn(async move {
|
let accept_task = tokio::spawn(async move {
|
||||||
let (mut stream, _) = listener.accept().await.unwrap();
|
let (mut stream, _) = listener.accept().await.unwrap();
|
||||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
|
||||||
stream.read_exact(&mut got_hello).await.unwrap();
|
|
||||||
assert_eq!(got_hello, expected_hello);
|
|
||||||
|
|
||||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||||
stream.read_exact(&mut got_tail).await.unwrap();
|
stream.read_exact(&mut got_tail).await.unwrap();
|
||||||
|
|
|
||||||
|
|
@ -30,12 +30,13 @@ const MASK_RELAY_IDLE_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
const MASK_RELAY_IDLE_TIMEOUT: Duration = Duration::from_millis(100);
|
const MASK_RELAY_IDLE_TIMEOUT: Duration = Duration::from_millis(100);
|
||||||
const MASK_BUFFER_SIZE: usize = 8192;
|
const MASK_BUFFER_SIZE: usize = 8192;
|
||||||
|
|
||||||
async fn copy_with_idle_timeout<R, W>(reader: &mut R, writer: &mut W)
|
async fn copy_with_idle_timeout<R, W>(reader: &mut R, writer: &mut W) -> usize
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin,
|
R: AsyncRead + Unpin,
|
||||||
W: AsyncWrite + Unpin,
|
W: AsyncWrite + Unpin,
|
||||||
{
|
{
|
||||||
let mut buf = [0u8; MASK_BUFFER_SIZE];
|
let mut buf = [0u8; MASK_BUFFER_SIZE];
|
||||||
|
let mut total = 0usize;
|
||||||
loop {
|
loop {
|
||||||
let read_res = timeout(MASK_RELAY_IDLE_TIMEOUT, reader.read(&mut buf)).await;
|
let read_res = timeout(MASK_RELAY_IDLE_TIMEOUT, reader.read(&mut buf)).await;
|
||||||
let n = match read_res {
|
let n = match read_res {
|
||||||
|
|
@ -45,6 +46,7 @@ where
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
total = total.saturating_add(n);
|
||||||
|
|
||||||
let write_res = timeout(MASK_RELAY_IDLE_TIMEOUT, writer.write_all(&buf[..n])).await;
|
let write_res = timeout(MASK_RELAY_IDLE_TIMEOUT, writer.write_all(&buf[..n])).await;
|
||||||
match write_res {
|
match write_res {
|
||||||
|
|
@ -52,6 +54,54 @@ where
|
||||||
Ok(Err(_)) | Err(_) => break,
|
Ok(Err(_)) | Err(_) => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
total
|
||||||
|
}
|
||||||
|
|
||||||
|
fn next_mask_shape_bucket(total: usize, floor: usize, cap: usize) -> usize {
|
||||||
|
if total == 0 || floor == 0 || cap < floor {
|
||||||
|
return total;
|
||||||
|
}
|
||||||
|
|
||||||
|
if total >= cap {
|
||||||
|
return total;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut bucket = floor;
|
||||||
|
while bucket < total {
|
||||||
|
match bucket.checked_mul(2) {
|
||||||
|
Some(next) => bucket = next,
|
||||||
|
None => return total,
|
||||||
|
}
|
||||||
|
if bucket > cap {
|
||||||
|
return total;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bucket
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn maybe_write_shape_padding<W>(
|
||||||
|
mask_write: &mut W,
|
||||||
|
total_sent: usize,
|
||||||
|
enabled: bool,
|
||||||
|
floor: usize,
|
||||||
|
cap: usize,
|
||||||
|
)
|
||||||
|
where
|
||||||
|
W: AsyncWrite + Unpin,
|
||||||
|
{
|
||||||
|
if !enabled {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let bucket = next_mask_shape_bucket(total_sent, floor, cap);
|
||||||
|
if bucket <= total_sent {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let pad_len = bucket - total_sent;
|
||||||
|
let pad = vec![0u8; pad_len];
|
||||||
|
let _ = timeout(MASK_TIMEOUT, mask_write.write_all(&pad)).await;
|
||||||
|
let _ = timeout(MASK_TIMEOUT, mask_write.flush()).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn write_proxy_header_with_timeout<W>(mask_write: &mut W, header: &[u8]) -> bool
|
async fn write_proxy_header_with_timeout<W>(mask_write: &mut W, header: &[u8]) -> bool
|
||||||
|
|
@ -201,7 +251,22 @@ where
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
if timeout(
|
||||||
|
MASK_RELAY_TIMEOUT,
|
||||||
|
relay_to_mask(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
mask_read,
|
||||||
|
mask_write,
|
||||||
|
initial_data,
|
||||||
|
config.censorship.mask_shape_hardening,
|
||||||
|
config.censorship.mask_shape_bucket_floor_bytes,
|
||||||
|
config.censorship.mask_shape_bucket_cap_bytes,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
debug!("Mask relay timed out (unix socket)");
|
debug!("Mask relay timed out (unix socket)");
|
||||||
}
|
}
|
||||||
wait_mask_outcome_budget(outcome_started).await;
|
wait_mask_outcome_budget(outcome_started).await;
|
||||||
|
|
@ -252,7 +317,22 @@ where
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
if timeout(
|
||||||
|
MASK_RELAY_TIMEOUT,
|
||||||
|
relay_to_mask(
|
||||||
|
reader,
|
||||||
|
writer,
|
||||||
|
mask_read,
|
||||||
|
mask_write,
|
||||||
|
initial_data,
|
||||||
|
config.censorship.mask_shape_hardening,
|
||||||
|
config.censorship.mask_shape_bucket_floor_bytes,
|
||||||
|
config.censorship.mask_shape_bucket_cap_bytes,
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
debug!("Mask relay timed out");
|
debug!("Mask relay timed out");
|
||||||
}
|
}
|
||||||
wait_mask_outcome_budget(outcome_started).await;
|
wait_mask_outcome_budget(outcome_started).await;
|
||||||
|
|
@ -278,6 +358,9 @@ async fn relay_to_mask<R, W, MR, MW>(
|
||||||
mut mask_read: MR,
|
mut mask_read: MR,
|
||||||
mut mask_write: MW,
|
mut mask_write: MW,
|
||||||
initial_data: &[u8],
|
initial_data: &[u8],
|
||||||
|
shape_hardening_enabled: bool,
|
||||||
|
shape_bucket_floor_bytes: usize,
|
||||||
|
shape_bucket_cap_bytes: usize,
|
||||||
)
|
)
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin + Send + 'static,
|
R: AsyncRead + Unpin + Send + 'static,
|
||||||
|
|
@ -295,11 +378,20 @@ where
|
||||||
|
|
||||||
let _ = tokio::join!(
|
let _ = tokio::join!(
|
||||||
async {
|
async {
|
||||||
copy_with_idle_timeout(&mut reader, &mut mask_write).await;
|
let copied = copy_with_idle_timeout(&mut reader, &mut mask_write).await;
|
||||||
|
let total_sent = initial_data.len().saturating_add(copied);
|
||||||
|
maybe_write_shape_padding(
|
||||||
|
&mut mask_write,
|
||||||
|
total_sent,
|
||||||
|
shape_hardening_enabled,
|
||||||
|
shape_bucket_floor_bytes,
|
||||||
|
shape_bucket_cap_bytes,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
let _ = mask_write.shutdown().await;
|
let _ = mask_write.shutdown().await;
|
||||||
},
|
},
|
||||||
async {
|
async {
|
||||||
copy_with_idle_timeout(&mut mask_read, &mut writer).await;
|
let _ = copy_with_idle_timeout(&mut mask_read, &mut writer).await;
|
||||||
let _ = writer.shutdown().await;
|
let _ = writer.shutdown().await;
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
|
||||||
|
|
@ -1318,6 +1318,9 @@ async fn relay_to_mask_keeps_backend_to_client_flow_when_client_to_backend_stall
|
||||||
backend_feed_reader,
|
backend_feed_reader,
|
||||||
PendingWriter,
|
PendingWriter,
|
||||||
b"",
|
b"",
|
||||||
|
false,
|
||||||
|
0,
|
||||||
|
0,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
});
|
});
|
||||||
|
|
@ -1421,7 +1424,7 @@ async fn relay_to_mask_timeout_cancels_and_drops_all_io_endpoints() {
|
||||||
|
|
||||||
let timed = timeout(
|
let timed = timeout(
|
||||||
Duration::from_millis(40),
|
Duration::from_millis(40),
|
||||||
relay_to_mask(reader, writer, mask_read, mask_write, b""),
|
relay_to_mask(reader, writer, mask_read, mask_write, b"", false, 0, 0),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue