Merge branch 'flow' into fix/api-docker-connection

This commit is contained in:
Frew777 2026-03-21 22:42:10 +03:00 committed by GitHub
commit 490a52eb87
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
60 changed files with 2767 additions and 444 deletions

View File

@ -21,16 +21,13 @@ env:
jobs: jobs:
prepare: prepare:
name: Prepare metadata
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs: outputs:
version: ${{ steps.meta.outputs.version }} version: ${{ steps.meta.outputs.version }}
prerelease: ${{ steps.meta.outputs.prerelease }} prerelease: ${{ steps.meta.outputs.prerelease }}
release_enabled: ${{ steps.meta.outputs.release_enabled }} release_enabled: ${{ steps.meta.outputs.release_enabled }}
steps: steps:
- name: Derive version - id: meta
id: meta
shell: bash
run: | run: |
set -euo pipefail set -euo pipefail
@ -53,62 +50,38 @@ jobs:
echo "release_enabled=$RELEASE_ENABLED" >> "$GITHUB_OUTPUT" echo "release_enabled=$RELEASE_ENABLED" >> "$GITHUB_OUTPUT"
checks: checks:
name: Checks
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: debian:trixie image: debian:trixie
steps: steps:
- name: Install system dependencies - run: |
run: |
set -euo pipefail
apt-get update apt-get update
apt-get install -y --no-install-recommends \ apt-get install -y build-essential clang llvm pkg-config curl git
ca-certificates \
curl \
git \
build-essential \
pkg-config \
clang \
llvm \
python3 \
python3-pip
update-ca-certificates
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
with: with:
components: rustfmt, clippy components: rustfmt, clippy
- name: Cache cargo - uses: actions/cache@v4
uses: actions/cache@v4
with: with:
path: | path: |
/github/home/.cargo/registry /github/home/.cargo/registry
/github/home/.cargo/git /github/home/.cargo/git
target target
key: checks-${{ runner.os }}-${{ hashFiles('**/Cargo.lock') }} key: checks-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
checks-${{ runner.os }}-
- name: Cargo fetch - run: cargo fetch --locked
run: cargo fetch --locked - run: cargo fmt --all -- --check
- run: cargo clippy
- name: Format - run: cargo test
run: cargo fmt --all -- --check
- name: Clippy
run: cargo clippy
- name: Tests
run: cargo test
build-binaries: build-binaries:
name: Build ${{ matrix.asset_name }}
needs: [prepare, checks] needs: [prepare, checks]
runs-on: ubuntu-latest runs-on: ubuntu-latest
container: container:
image: debian:trixie image: debian:trixie
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -127,154 +100,80 @@ jobs:
asset_name: telemt-aarch64-linux-musl asset_name: telemt-aarch64-linux-musl
steps: steps:
- name: Install system dependencies - run: |
run: |
set -euo pipefail
apt-get update apt-get update
apt-get install -y --no-install-recommends \ apt-get install -y clang llvm pkg-config curl git python3 python3-pip file tar xz-utils
ca-certificates \
curl \
git \
build-essential \
pkg-config \
clang \
llvm \
file \
tar \
xz-utils \
python3 \
python3-pip
update-ca-certificates
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable - uses: dtolnay/rust-toolchain@stable
with: with:
targets: ${{ matrix.rust_target }} targets: ${{ matrix.rust_target }}
- name: Cache cargo - uses: actions/cache@v4
uses: actions/cache@v4
with: with:
path: | path: |
/github/home/.cargo/registry /github/home/.cargo/registry
/github/home/.cargo/git /github/home/.cargo/git
target target
key: build-${{ matrix.zig_target }}-${{ hashFiles('**/Cargo.lock') }} key: build-${{ matrix.zig_target }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
build-${{ matrix.zig_target }}-
- name: Install cargo-zigbuild + Zig - run: |
run: |
set -euo pipefail
python3 -m pip install --user --break-system-packages cargo-zigbuild python3 -m pip install --user --break-system-packages cargo-zigbuild
echo "/github/home/.local/bin" >> "$GITHUB_PATH" echo "/github/home/.local/bin" >> "$GITHUB_PATH"
- name: Cargo fetch - run: cargo fetch --locked
run: cargo fetch --locked
- name: Build release - run: |
env:
CARGO_PROFILE_RELEASE_LTO: "fat"
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1"
CARGO_PROFILE_RELEASE_PANIC: "abort"
run: |
set -euo pipefail
cargo zigbuild --release --locked --target "${{ matrix.zig_target }}" cargo zigbuild --release --locked --target "${{ matrix.zig_target }}"
- name: Debug target dir (optional but useful) - run: |
run: | BIN="target/${{ matrix.rust_target }}/release/${BINARY_NAME}"
set -euo pipefail llvm-strip "$BIN" || true
ls -lah "target/${{ matrix.rust_target }}/release/" || true
- name: Strip binary - run: |
run: | BIN="target/${{ matrix.rust_target }}/release/${BINARY_NAME}"
set -euo pipefail OUT="$RUNNER_TEMP/${{ matrix.asset_name }}"
llvm-strip "target/${{ matrix.rust_target }}/release/${BINARY_NAME}" || true mkdir -p "$OUT"
install -m755 "$BIN" "$OUT/${BINARY_NAME}"
- name: Inspect binary tar -C "$RUNNER_TEMP" -czf "${{ matrix.asset_name }}.tar.gz" "${{ matrix.asset_name }}"
run: | sha256sum "${{ matrix.asset_name }}.tar.gz" > "${{ matrix.asset_name }}.sha256"
set -euo pipefail
file "target/${{ matrix.rust_target }}/release/${BINARY_NAME}"
- name: Package
run: |
set -euo pipefail
BIN_PATH="target/${{ matrix.rust_target }}/release/${BINARY_NAME}"
test -f "$BIN_PATH"
OUTDIR="$RUNNER_TEMP/pkg/${{ matrix.asset_name }}"
mkdir -p "$OUTDIR"
install -m 0755 "$BIN_PATH" "$OUTDIR/${BINARY_NAME}"
if [[ -f LICENSE ]]; then cp LICENSE "$OUTDIR/"; fi
if [[ -f README.md ]]; then cp README.md "$OUTDIR/"; fi
cat > "$OUTDIR/BUILD-INFO.txt" <<EOF
project=${GITHUB_REPOSITORY}
version=${{ needs.prepare.outputs.version }}
git_ref=${GITHUB_REF}
git_sha=${GITHUB_SHA}
rust_target=${{ matrix.rust_target }}
zig_target=${{ matrix.zig_target }}
built_at=$(date -u +%Y-%m-%dT%H:%M:%SZ)
EOF
mkdir -p dist
tar -C "$RUNNER_TEMP/pkg" -czf "dist/${{ matrix.asset_name }}.tar.gz" "${{ matrix.asset_name }}"
sha256sum "dist/${{ matrix.asset_name }}.tar.gz" > "dist/${{ matrix.asset_name }}.sha256"
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v4
with: with:
name: ${{ matrix.asset_name }} name: ${{ matrix.asset_name }}
path: | path: |
dist/${{ matrix.asset_name }}.tar.gz ${{ matrix.asset_name }}.tar.gz
dist/${{ matrix.asset_name }}.sha256 ${{ matrix.asset_name }}.sha256
if-no-files-found: error
retention-days: 14
attest-binaries:
name: Attest binary archives
needs: build-binaries
runs-on: ubuntu-latest
permissions:
contents: read
attestations: write
id-token: write
steps:
- uses: actions/download-artifact@v4
with:
path: dist
- name: Flatten artifacts
run: |
set -euo pipefail
mkdir -p upload
find dist -type f \( -name '*.tar.gz' -o -name '*.sha256' \) -exec cp {} upload/ \;
ls -lah upload
- name: Attest release archives
uses: actions/attest-build-provenance@v3
with:
subject-path: 'upload/*.tar.gz'
docker-image: docker-image:
name: Build and push GHCR image name: Docker ${{ matrix.platform }}
needs: [prepare, checks] needs: [prepare, build-binaries]
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: read strategy:
packages: write matrix:
include:
- platform: linux/amd64
artifact: telemt-x86_64-linux-gnu
- platform: linux/arm64
artifact: telemt-aarch64-linux-gnu
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- uses: docker/setup-qemu-action@v3 - uses: actions/download-artifact@v4
with:
name: ${{ matrix.artifact }}
path: dist
- run: |
mkdir docker-build
tar -xzf dist/*.tar.gz -C docker-build --strip-components=1
- uses: docker/setup-buildx-action@v3 - uses: docker/setup-buildx-action@v3
- name: Log in to GHCR - name: Login
if: ${{ needs.prepare.outputs.release_enabled == 'true' }} if: ${{ needs.prepare.outputs.release_enabled == 'true' }}
uses: docker/login-action@v3 uses: docker/login-action@v3
with: with:
@ -282,42 +181,20 @@ jobs:
username: ${{ github.actor }} username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Docker metadata - uses: docker/build-push-action@v6
id: meta
uses: docker/metadata-action@v5
with: with:
images: ghcr.io/${{ github.repository }} context: ./docker-build
tags: | platforms: ${{ matrix.platform }}
type=raw,value=${{ needs.prepare.outputs.version }}
type=raw,value=latest,enable=${{ needs.prepare.outputs.prerelease != 'true' && needs.prepare.outputs.release_enabled == 'true' }}
labels: |
org.opencontainers.image.title=telemt
org.opencontainers.image.description=telemt
org.opencontainers.image.source=https://github.com/${{ github.repository }}
org.opencontainers.image.version=${{ needs.prepare.outputs.version }}
org.opencontainers.image.revision=${{ github.sha }}
- name: Build and push
uses: docker/build-push-action@v6
with:
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64
push: ${{ needs.prepare.outputs.release_enabled == 'true' }} push: ${{ needs.prepare.outputs.release_enabled == 'true' }}
tags: ${{ steps.meta.outputs.tags }} tags: ghcr.io/${{ github.repository }}:${{ needs.prepare.outputs.version }}
labels: ${{ steps.meta.outputs.labels }} cache-from: type=gha,scope=telemt-${{ matrix.platform }}
cache-from: type=gha cache-to: type=gha,mode=max,scope=telemt-${{ matrix.platform }}
cache-to: type=gha,mode=max provenance: false
provenance: mode=max sbom: false
sbom: true
build-args: |
TELEMT_VERSION=${{ needs.prepare.outputs.version }}
VCS_REF=${{ github.sha }}
release: release:
name: Create GitHub Release
if: ${{ needs.prepare.outputs.release_enabled == 'true' }} if: ${{ needs.prepare.outputs.release_enabled == 'true' }}
needs: [prepare, build-binaries, attest-binaries, docker-image] needs: [prepare, build-binaries]
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: write contents: write
@ -326,18 +203,14 @@ jobs:
- uses: actions/download-artifact@v4 - uses: actions/download-artifact@v4
with: with:
path: release-artifacts path: release-artifacts
pattern: telemt-*
- name: Flatten artifacts - run: |
run: | mkdir upload
set -euo pipefail
mkdir -p upload
find release-artifacts -type f \( -name '*.tar.gz' -o -name '*.sha256' \) -exec cp {} upload/ \; find release-artifacts -type f \( -name '*.tar.gz' -o -name '*.sha256' \) -exec cp {} upload/ \;
ls -lah upload
- name: Create release - uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@v2
with: with:
files: upload/* files: upload/*
generate_release_notes: true generate_release_notes: true
draft: false
prerelease: ${{ needs.prepare.outputs.prerelease == 'true' }} prerelease: ${{ needs.prepare.outputs.prerelease == 'true' }}

4
Cargo.lock generated
View File

@ -90,9 +90,9 @@ checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c"
[[package]] [[package]]
name = "arc-swap" name = "arc-swap"
version = "1.8.2" version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" checksum = "a07d1f37ff60921c83bdfc7407723bdefe89b44b98a9b772f225c8f9d67141a6"
dependencies = [ dependencies = [
"rustversion", "rustversion",
] ]

View File

@ -27,7 +27,7 @@ static_assertions = "1.1"
# Network # Network
socket2 = { version = "0.6", features = ["all"] } socket2 = { version = "0.6", features = ["all"] }
nix = { version = "0.31", default-features = false, features = ["net"] } nix = { version = "0.31", default-features = false, features = ["net", "fs"] }
shadowsocks = { version = "1.24", features = ["aead-cipher-2022"] } shadowsocks = { version = "1.24", features = ["aead-cipher-2022"] }
# Serialization # Serialization

View File

@ -261,6 +261,7 @@ This document lists all configuration keys accepted by `config.toml`.
| alpn_enforce | `bool` | `true` | — | Enforces ALPN echo behavior based on client preference. | | alpn_enforce | `bool` | `true` | — | Enforces ALPN echo behavior based on client preference. |
| mask_proxy_protocol | `u8` | `0` | — | PROXY protocol mode for mask backend (`0` disabled, `1` v1, `2` v2). | | mask_proxy_protocol | `u8` | `0` | — | PROXY protocol mode for mask backend (`0` disabled, `1` v1, `2` v2). |
| mask_shape_hardening | `bool` | `true` | — | Enables client->mask shape-channel hardening by applying controlled tail padding to bucket boundaries on mask relay shutdown. | | mask_shape_hardening | `bool` | `true` | — | Enables client->mask shape-channel hardening by applying controlled tail padding to bucket boundaries on mask relay shutdown. |
| mask_shape_hardening_aggressive_mode | `bool` | `false` | Requires `mask_shape_hardening = true`. | Opt-in aggressive shaping profile: allows shaping on backend-silent non-EOF paths and switches above-cap blur to strictly positive random tail. |
| mask_shape_bucket_floor_bytes | `usize` | `512` | Must be `> 0`; should be `<= mask_shape_bucket_cap_bytes`. | Minimum bucket size used by shape-channel hardening. | | mask_shape_bucket_floor_bytes | `usize` | `512` | Must be `> 0`; should be `<= mask_shape_bucket_cap_bytes`. | Minimum bucket size used by shape-channel hardening. |
| mask_shape_bucket_cap_bytes | `usize` | `4096` | Must be `>= mask_shape_bucket_floor_bytes`. | Maximum bucket size used by shape-channel hardening; traffic above cap is not padded further. | | mask_shape_bucket_cap_bytes | `usize` | `4096` | Must be `>= mask_shape_bucket_floor_bytes`. | Maximum bucket size used by shape-channel hardening; traffic above cap is not padded further. |
| mask_shape_above_cap_blur | `bool` | `false` | Requires `mask_shape_hardening = true`; requires `mask_shape_above_cap_blur_max_bytes > 0`. | Adds bounded randomized tail bytes even when forwarded size already exceeds cap. | | mask_shape_above_cap_blur | `bool` | `false` | Requires `mask_shape_hardening = true`; requires `mask_shape_above_cap_blur_max_bytes > 0`. | Adds bounded randomized tail bytes even when forwarded size already exceeds cap. |
@ -284,6 +285,27 @@ When `mask_shape_hardening = true`, Telemt pads the **client->mask** stream tail
This means multiple nearby probe sizes collapse into the same backend-observed size class, making active classification harder. This means multiple nearby probe sizes collapse into the same backend-observed size class, making active classification harder.
What each parameter changes in practice:
- `mask_shape_hardening`
Enables or disables this entire length-shaping stage on the fallback path.
When `false`, backend-observed length stays close to the real forwarded probe length.
When `true`, clean relay shutdown can append random padding bytes to move the total into a bucket.
- `mask_shape_bucket_floor_bytes`
Sets the first bucket boundary used for small probes.
Example: with floor `512`, a malformed probe that would otherwise forward `37` bytes can be expanded to `512` bytes on clean EOF.
Larger floor values hide very small probes better, but increase egress cost.
- `mask_shape_bucket_cap_bytes`
Sets the largest bucket Telemt will pad up to with bucket logic.
Example: with cap `4096`, a forwarded total of `1800` bytes may be padded to `2048` or `4096` depending on the bucket ladder, but a total already above `4096` will not be bucket-padded further.
Larger cap values increase the range over which size classes are collapsed, but also increase worst-case overhead.
- Clean EOF matters in conservative mode
In the default profile, shape padding is intentionally conservative: it is applied on clean relay shutdown, not on every timeout/drip path.
This avoids introducing new timeout-tail artifacts that some backends or tests interpret as a separate fingerprint.
Practical trade-offs: Practical trade-offs:
- Better anti-fingerprinting on size/shape channel. - Better anti-fingerprinting on size/shape channel.
@ -296,14 +318,56 @@ Recommended starting profile:
- `mask_shape_bucket_floor_bytes = 512` - `mask_shape_bucket_floor_bytes = 512`
- `mask_shape_bucket_cap_bytes = 4096` - `mask_shape_bucket_cap_bytes = 4096`
### Aggressive mode notes (`[censorship]`)
`mask_shape_hardening_aggressive_mode` is an opt-in profile for higher anti-classifier pressure.
- Default is `false` to preserve conservative timeout/no-tail behavior.
- Requires `mask_shape_hardening = true`.
- When enabled, backend-silent non-EOF masking paths may be shaped.
- When enabled together with above-cap blur, the random extra tail uses `[1, max]` instead of `[0, max]`.
What changes when aggressive mode is enabled:
- Backend-silent timeout paths can be shaped
In default mode, a client that keeps the socket half-open and times out will usually not receive shape padding on that path.
In aggressive mode, Telemt may still shape that backend-silent session if no backend bytes were returned.
This is specifically aimed at active probes that try to avoid EOF in order to preserve an exact backend-observed length.
- Above-cap blur always adds at least one byte
In default mode, above-cap blur may choose `0`, so some oversized probes still land on their exact base forwarded length.
In aggressive mode, that exact-base sample is removed by construction.
- Tradeoff
Aggressive mode improves resistance to active length classifiers, but it is more opinionated and less conservative.
If your deployment prioritizes strict compatibility with timeout/no-tail semantics, leave it disabled.
If your threat model includes repeated active probing by a censor, this mode is the stronger profile.
Use this mode only when your threat model prioritizes classifier resistance over strict compatibility with conservative masking semantics.
### Above-cap blur notes (`[censorship]`) ### Above-cap blur notes (`[censorship]`)
`mask_shape_above_cap_blur` adds a second-stage blur for very large probes that are already above `mask_shape_bucket_cap_bytes`. `mask_shape_above_cap_blur` adds a second-stage blur for very large probes that are already above `mask_shape_bucket_cap_bytes`.
- A random tail in `[0, mask_shape_above_cap_blur_max_bytes]` is appended. - A random tail in `[0, mask_shape_above_cap_blur_max_bytes]` is appended in default mode.
- In aggressive mode, the random tail becomes strictly positive: `[1, mask_shape_above_cap_blur_max_bytes]`.
- This reduces exact-size leakage above cap at bounded overhead. - This reduces exact-size leakage above cap at bounded overhead.
- Keep `mask_shape_above_cap_blur_max_bytes` conservative to avoid unnecessary egress growth. - Keep `mask_shape_above_cap_blur_max_bytes` conservative to avoid unnecessary egress growth.
Operational meaning:
- Without above-cap blur
A probe that forwards `5005` bytes will still look like `5005` bytes to the backend if it is already above cap.
- With above-cap blur enabled
That same probe may look like any value in a bounded window above its base length.
Example with `mask_shape_above_cap_blur_max_bytes = 64`:
backend-observed size becomes `5005..5069` in default mode, or `5006..5069` in aggressive mode.
- Choosing `mask_shape_above_cap_blur_max_bytes`
Small values reduce cost but preserve more separability between far-apart oversized classes.
Larger values blur oversized classes more aggressively, but add more egress overhead and more output variance.
### Timing normalization envelope notes (`[censorship]`) ### Timing normalization envelope notes (`[censorship]`)
`mask_timing_normalization_enabled` smooths timing differences between masking outcomes by applying a target duration envelope. `mask_timing_normalization_enabled` smooths timing differences between masking outcomes by applying a target duration envelope.

View File

@ -1,3 +1,5 @@
#![allow(clippy::too_many_arguments)]
use std::convert::Infallible; use std::convert::Infallible;
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::path::PathBuf; use std::path::PathBuf;

View File

@ -495,11 +495,11 @@ fn resolve_link_hosts(
push_unique_host(&mut hosts, host); push_unique_host(&mut hosts, host);
continue; continue;
} }
if let Some(ip) = listener.announce_ip { if let Some(ip) = listener.announce_ip
if !ip.is_unspecified() { && !ip.is_unspecified()
push_unique_host(&mut hosts, &ip.to_string()); {
continue; push_unique_host(&mut hosts, &ip.to_string());
} continue;
} }
if listener.ip.is_unspecified() { if listener.ip.is_unspecified() {
let detected_ip = if listener.ip.is_ipv4() { let detected_ip = if listener.ip.is_ipv4() {

View File

@ -523,6 +523,10 @@ pub(crate) fn default_mask_shape_hardening() -> bool {
true true
} }
pub(crate) fn default_mask_shape_hardening_aggressive_mode() -> bool {
false
}
pub(crate) fn default_mask_shape_bucket_floor_bytes() -> usize { pub(crate) fn default_mask_shape_bucket_floor_bytes() -> usize {
512 512
} }

View File

@ -406,6 +406,15 @@ impl ProxyConfig {
)); ));
} }
if config.censorship.mask_shape_hardening_aggressive_mode
&& !config.censorship.mask_shape_hardening
{
return Err(ProxyError::Config(
"censorship.mask_shape_hardening_aggressive_mode requires censorship.mask_shape_hardening = true"
.to_string(),
));
}
if config.censorship.mask_shape_above_cap_blur if config.censorship.mask_shape_above_cap_blur
&& config.censorship.mask_shape_above_cap_blur_max_bytes == 0 && config.censorship.mask_shape_above_cap_blur_max_bytes == 0
{ {

View File

@ -194,3 +194,45 @@ mask_timing_normalization_ceiling_ms = 240
remove_temp_config(&path); remove_temp_config(&path);
} }
#[test]
fn load_rejects_aggressive_shape_mode_when_shape_hardening_disabled() {
let path = write_temp_config(
r#"
[censorship]
mask_shape_hardening = false
mask_shape_hardening_aggressive_mode = true
"#,
);
let err = ProxyConfig::load(&path)
.expect_err("aggressive shape hardening mode must require shape hardening enabled");
let msg = err.to_string();
assert!(
msg.contains("censorship.mask_shape_hardening_aggressive_mode requires censorship.mask_shape_hardening = true"),
"error must explain aggressive-mode prerequisite, got: {msg}"
);
remove_temp_config(&path);
}
#[test]
fn load_accepts_aggressive_shape_mode_when_shape_hardening_enabled() {
let path = write_temp_config(
r#"
[censorship]
mask_shape_hardening = true
mask_shape_hardening_aggressive_mode = true
mask_shape_above_cap_blur = true
mask_shape_above_cap_blur_max_bytes = 8
"#,
);
let cfg = ProxyConfig::load(&path)
.expect("aggressive shape hardening mode should be accepted when prerequisites are met");
assert!(cfg.censorship.mask_shape_hardening);
assert!(cfg.censorship.mask_shape_hardening_aggressive_mode);
assert!(cfg.censorship.mask_shape_above_cap_blur);
remove_temp_config(&path);
}

View File

@ -1417,6 +1417,12 @@ pub struct AntiCensorshipConfig {
#[serde(default = "default_mask_shape_hardening")] #[serde(default = "default_mask_shape_hardening")]
pub mask_shape_hardening: bool, pub mask_shape_hardening: bool,
/// Opt-in aggressive shape hardening mode.
/// When enabled, masking may shape some backend-silent timeout paths and
/// enforces strictly positive above-cap blur when blur is enabled.
#[serde(default = "default_mask_shape_hardening_aggressive_mode")]
pub mask_shape_hardening_aggressive_mode: bool,
/// Minimum bucket size for mask shape hardening padding. /// Minimum bucket size for mask shape hardening padding.
#[serde(default = "default_mask_shape_bucket_floor_bytes")] #[serde(default = "default_mask_shape_bucket_floor_bytes")]
pub mask_shape_bucket_floor_bytes: usize, pub mask_shape_bucket_floor_bytes: usize,
@ -1467,6 +1473,7 @@ impl Default for AntiCensorshipConfig {
alpn_enforce: default_alpn_enforce(), alpn_enforce: default_alpn_enforce(),
mask_proxy_protocol: 0, mask_proxy_protocol: 0,
mask_shape_hardening: default_mask_shape_hardening(), mask_shape_hardening: default_mask_shape_hardening(),
mask_shape_hardening_aggressive_mode: default_mask_shape_hardening_aggressive_mode(),
mask_shape_bucket_floor_bytes: default_mask_shape_bucket_floor_bytes(), mask_shape_bucket_floor_bytes: default_mask_shape_bucket_floor_bytes(),
mask_shape_bucket_cap_bytes: default_mask_shape_bucket_cap_bytes(), mask_shape_bucket_cap_bytes: default_mask_shape_bucket_cap_bytes(),
mask_shape_above_cap_blur: default_mask_shape_above_cap_blur(), mask_shape_above_cap_blur: default_mask_shape_above_cap_blur(),

View File

@ -22,7 +22,7 @@ pub struct UserIpTracker {
limit_mode: Arc<RwLock<UserMaxUniqueIpsMode>>, limit_mode: Arc<RwLock<UserMaxUniqueIpsMode>>,
limit_window: Arc<RwLock<Duration>>, limit_window: Arc<RwLock<Duration>>,
last_compact_epoch_secs: Arc<AtomicU64>, last_compact_epoch_secs: Arc<AtomicU64>,
pub(crate) cleanup_queue: Arc<Mutex<Vec<(String, IpAddr)>>>, cleanup_queue: Arc<Mutex<Vec<(String, IpAddr)>>>,
cleanup_drain_lock: Arc<AsyncMutex<()>>, cleanup_drain_lock: Arc<AsyncMutex<()>>,
} }
@ -57,6 +57,19 @@ impl UserIpTracker {
} }
} }
#[cfg(test)]
pub(crate) fn cleanup_queue_len_for_tests(&self) -> usize {
self.cleanup_queue
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
.len()
}
#[cfg(test)]
pub(crate) fn cleanup_queue_mutex_for_tests(&self) -> Arc<Mutex<Vec<(String, IpAddr)>>> {
Arc::clone(&self.cleanup_queue)
}
pub(crate) async fn drain_cleanup_queue(&self) { pub(crate) async fn drain_cleanup_queue(&self) {
// Serialize queue draining and active-IP mutation so check-and-add cannot // Serialize queue draining and active-IP mutation so check-and-add cannot
// observe stale active entries that are already queued for removal. // observe stale active entries that are already queued for removal.

View File

@ -1,3 +1,5 @@
#![allow(clippy::too_many_arguments)]
use std::sync::Arc; use std::sync::Arc;
use std::time::Instant; use std::time::Instant;

View File

@ -1,3 +1,5 @@
#![allow(clippy::items_after_test_module)]
use std::path::PathBuf; use std::path::PathBuf;
use std::time::Duration; use std::time::Duration;

View File

@ -1,3 +1,5 @@
#![allow(clippy::too_many_arguments)]
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;

View File

@ -10,6 +10,9 @@ mod ip_tracker;
#[path = "tests/ip_tracker_hotpath_adversarial_tests.rs"] #[path = "tests/ip_tracker_hotpath_adversarial_tests.rs"]
mod ip_tracker_hotpath_adversarial_tests; mod ip_tracker_hotpath_adversarial_tests;
#[cfg(test)] #[cfg(test)]
#[path = "tests/ip_tracker_encapsulation_adversarial_tests.rs"]
mod ip_tracker_encapsulation_adversarial_tests;
#[cfg(test)]
#[path = "tests/ip_tracker_regression_tests.rs"] #[path = "tests/ip_tracker_regression_tests.rs"]
mod ip_tracker_regression_tests; mod ip_tracker_regression_tests;
mod maestro; mod maestro;

View File

@ -1,4 +1,5 @@
#![allow(dead_code)] #![allow(dead_code)]
#![allow(clippy::items_after_test_module)]
use std::collections::HashMap; use std::collections::HashMap;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
@ -197,11 +198,10 @@ pub async fn run_probe(
if nat_probe if nat_probe
&& probe.reflected_ipv4.is_none() && probe.reflected_ipv4.is_none()
&& probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false) && probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false)
&& let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await
{ {
if let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await { probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0));
probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0)); info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback");
info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback");
}
} }
probe.ipv4_nat_detected = match (probe.detected_ipv4, probe.reflected_ipv4) { probe.ipv4_nat_detected = match (probe.detected_ipv4, probe.reflected_ipv4) {
@ -286,8 +286,6 @@ async fn probe_stun_servers_parallel(
while next_idx < servers.len() && join_set.len() < concurrency { while next_idx < servers.len() && join_set.len() < concurrency {
let stun_addr = servers[next_idx].clone(); let stun_addr = servers[next_idx].clone();
next_idx += 1; next_idx += 1;
let bind_v4 = bind_v4;
let bind_v6 = bind_v6;
join_set.spawn(async move { join_set.spawn(async move {
let res = timeout(STUN_BATCH_TIMEOUT, async { let res = timeout(STUN_BATCH_TIMEOUT, async {
let v4 = stun_probe_family_with_bind(&stun_addr, IpFamily::V4, bind_v4).await?; let v4 = stun_probe_family_with_bind(&stun_addr, IpFamily::V4, bind_v4).await?;

View File

@ -0,0 +1,37 @@
use super::*;
#[test]
fn extension_builder_fails_closed_on_u16_length_overflow() {
let builder = TlsExtensionBuilder {
extensions: vec![0u8; (u16::MAX as usize) + 1],
};
let built = builder.build();
assert!(
built.is_empty(),
"oversized extension blob must fail closed instead of truncating length field"
);
}
#[test]
fn server_hello_builder_fails_closed_on_session_id_len_overflow() {
let builder = ServerHelloBuilder {
random: [0u8; 32],
session_id: vec![0xAB; (u8::MAX as usize) + 1],
cipher_suite: cipher_suite::TLS_AES_128_GCM_SHA256,
compression: 0,
extensions: TlsExtensionBuilder::new(),
};
let message = builder.build_message();
let record = builder.build_record();
assert!(
message.is_empty(),
"session_id length overflow must fail closed in message builder"
);
assert!(
record.is_empty(),
"session_id length overflow must fail closed in record builder"
);
}

View File

@ -5,6 +5,61 @@
//! actually carries MTProto authentication data. //! actually carries MTProto authentication data.
#![allow(dead_code)] #![allow(dead_code)]
#![cfg_attr(not(test), forbid(clippy::undocumented_unsafe_blocks))]
#![cfg_attr(
not(test),
deny(
clippy::unwrap_used,
clippy::expect_used,
clippy::panic,
clippy::todo,
clippy::unimplemented,
clippy::correctness,
clippy::option_if_let_else,
clippy::or_fun_call,
clippy::branches_sharing_code,
clippy::single_option_map,
clippy::useless_let_if_seq,
clippy::redundant_locals,
clippy::cloned_ref_to_slice_refs,
unsafe_code,
clippy::await_holding_lock,
clippy::await_holding_refcell_ref,
clippy::debug_assert_with_mut_call,
clippy::macro_use_imports,
clippy::cast_ptr_alignment,
clippy::cast_lossless,
clippy::ptr_as_ptr,
clippy::large_stack_arrays,
clippy::same_functions_in_if_condition,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
rust_2018_idioms
)
)]
#![cfg_attr(
not(test),
allow(
clippy::use_self,
clippy::redundant_closure,
clippy::too_many_arguments,
clippy::doc_markdown,
clippy::missing_const_for_fn,
clippy::unnecessary_operation,
clippy::redundant_pub_crate,
clippy::derive_partial_eq_without_eq,
clippy::type_complexity,
clippy::new_ret_no_self,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::significant_drop_tightening,
clippy::significant_drop_in_scrutinee,
clippy::float_cmp,
clippy::nursery
)
)]
use super::constants::*; use super::constants::*;
use crate::crypto::{SecureRandom, sha256_hmac}; use crate::crypto::{SecureRandom, sha256_hmac};
@ -127,12 +182,13 @@ impl TlsExtensionBuilder {
} }
/// Build final extensions with length prefix /// Build final extensions with length prefix
fn build(self) -> Vec<u8> { fn build(self) -> Vec<u8> {
let Ok(len) = u16::try_from(self.extensions.len()) else {
return Vec::new();
};
let mut result = Vec::with_capacity(2 + self.extensions.len()); let mut result = Vec::with_capacity(2 + self.extensions.len());
// Extensions length (2 bytes) // Extensions length (2 bytes)
let len = self.extensions.len() as u16;
result.extend_from_slice(&len.to_be_bytes()); result.extend_from_slice(&len.to_be_bytes());
// Extensions data // Extensions data
@ -142,7 +198,6 @@ impl TlsExtensionBuilder {
} }
/// Get current extensions without length prefix (for calculation) /// Get current extensions without length prefix (for calculation)
fn as_bytes(&self) -> &[u8] { fn as_bytes(&self) -> &[u8] {
&self.extensions &self.extensions
} }
@ -188,8 +243,13 @@ impl ServerHelloBuilder {
/// Build ServerHello message (without record header) /// Build ServerHello message (without record header)
fn build_message(&self) -> Vec<u8> { fn build_message(&self) -> Vec<u8> {
let Ok(session_id_len) = u8::try_from(self.session_id.len()) else {
return Vec::new();
};
let extensions = self.extensions.extensions.clone(); let extensions = self.extensions.extensions.clone();
let extensions_len = extensions.len() as u16; let Ok(extensions_len) = u16::try_from(extensions.len()) else {
return Vec::new();
};
// Calculate total length // Calculate total length
let body_len = 2 + // version let body_len = 2 + // version
@ -198,6 +258,9 @@ impl ServerHelloBuilder {
2 + // cipher suite 2 + // cipher suite
1 + // compression 1 + // compression
2 + extensions.len(); // extensions length + data 2 + extensions.len(); // extensions length + data
if body_len > 0x00ff_ffff {
return Vec::new();
}
let mut message = Vec::with_capacity(4 + body_len); let mut message = Vec::with_capacity(4 + body_len);
@ -205,7 +268,10 @@ impl ServerHelloBuilder {
message.push(0x02); // ServerHello message type message.push(0x02); // ServerHello message type
// 3-byte length // 3-byte length
let len_bytes = (body_len as u32).to_be_bytes(); let Ok(body_len_u32) = u32::try_from(body_len) else {
return Vec::new();
};
let len_bytes = body_len_u32.to_be_bytes();
message.extend_from_slice(&len_bytes[1..4]); message.extend_from_slice(&len_bytes[1..4]);
// Server version (TLS 1.2 in header, actual version in extension) // Server version (TLS 1.2 in header, actual version in extension)
@ -215,7 +281,7 @@ impl ServerHelloBuilder {
message.extend_from_slice(&self.random); message.extend_from_slice(&self.random);
// Session ID // Session ID
message.push(self.session_id.len() as u8); message.push(session_id_len);
message.extend_from_slice(&self.session_id); message.extend_from_slice(&self.session_id);
// Cipher suite // Cipher suite
@ -236,13 +302,19 @@ impl ServerHelloBuilder {
/// Build complete ServerHello TLS record /// Build complete ServerHello TLS record
fn build_record(&self) -> Vec<u8> { fn build_record(&self) -> Vec<u8> {
let message = self.build_message(); let message = self.build_message();
if message.is_empty() {
return Vec::new();
}
let Ok(message_len) = u16::try_from(message.len()) else {
return Vec::new();
};
let mut record = Vec::with_capacity(5 + message.len()); let mut record = Vec::with_capacity(5 + message.len());
// TLS record header // TLS record header
record.push(TLS_RECORD_HANDSHAKE); record.push(TLS_RECORD_HANDSHAKE);
record.extend_from_slice(&TLS_VERSION); record.extend_from_slice(&TLS_VERSION);
record.extend_from_slice(&(message.len() as u16).to_be_bytes()); record.extend_from_slice(&message_len.to_be_bytes());
// Message // Message
record.extend_from_slice(&message); record.extend_from_slice(&message);
@ -258,7 +330,6 @@ impl ServerHelloBuilder {
/// Returns validation result if a matching user is found. /// Returns validation result if a matching user is found.
/// The result **must** be used — ignoring it silently bypasses authentication. /// The result **must** be used — ignoring it silently bypasses authentication.
#[must_use] #[must_use]
pub fn validate_tls_handshake( pub fn validate_tls_handshake(
handshake: &[u8], handshake: &[u8],
secrets: &[(String, Vec<u8>)], secrets: &[(String, Vec<u8>)],
@ -628,11 +699,10 @@ pub fn extract_sni_from_client_hello(handshake: &[u8]) -> Option<String> {
if name_type == 0 if name_type == 0
&& name_len > 0 && name_len > 0
&& let Ok(host) = std::str::from_utf8(&handshake[sn_pos..sn_pos + name_len]) && let Ok(host) = std::str::from_utf8(&handshake[sn_pos..sn_pos + name_len])
&& is_valid_sni_hostname(host)
{ {
if is_valid_sni_hostname(host) { extracted_sni = Some(host.to_string());
extracted_sni = Some(host.to_string()); break;
break;
}
} }
sn_pos += name_len; sn_pos += name_len;
} }
@ -754,7 +824,6 @@ pub fn is_tls_handshake(first_bytes: &[u8]) -> bool {
} }
/// Parse TLS record header, returns (record_type, length) /// Parse TLS record header, returns (record_type, length)
pub fn parse_tls_record_header(header: &[u8; 5]) -> Option<(u8, u16)> { pub fn parse_tls_record_header(header: &[u8; 5]) -> Option<(u8, u16)> {
let record_type = header[0]; let record_type = header[0];
let version = [header[1], header[2]]; let version = [header[1], header[2]];
@ -860,3 +929,7 @@ mod adversarial_tests;
#[cfg(test)] #[cfg(test)]
#[path = "tests/tls_fuzz_security_tests.rs"] #[path = "tests/tls_fuzz_security_tests.rs"]
mod fuzz_security_tests; mod fuzz_security_tests;
#[cfg(test)]
#[path = "tests/tls_length_cast_hardening_security_tests.rs"]
mod length_cast_hardening_security_tests;

View File

@ -1,3 +1,8 @@
#![allow(dead_code)]
// Adaptive buffer policy is staged and retained for deterministic rollout.
// Keep definitions compiled for compatibility and security test scaffolding.
use dashmap::DashMap; use dashmap::DashMap;
use std::cmp::max; use std::cmp::max;
use std::sync::OnceLock; use std::sync::OnceLock;

View File

@ -116,11 +116,23 @@ fn beobachten_ttl(config: &ProxyConfig) -> Duration {
} }
fn wrap_tls_application_record(payload: &[u8]) -> Vec<u8> { fn wrap_tls_application_record(payload: &[u8]) -> Vec<u8> {
let mut record = Vec::with_capacity(5 + payload.len()); let chunks = payload.len().div_ceil(u16::MAX as usize).max(1);
record.push(TLS_RECORD_APPLICATION); let mut record = Vec::with_capacity(payload.len() + 5 * chunks);
record.extend_from_slice(&TLS_VERSION);
record.extend_from_slice(&(payload.len() as u16).to_be_bytes()); if payload.is_empty() {
record.extend_from_slice(payload); record.push(TLS_RECORD_APPLICATION);
record.extend_from_slice(&TLS_VERSION);
record.extend_from_slice(&0u16.to_be_bytes());
return record;
}
for chunk in payload.chunks(u16::MAX as usize) {
record.push(TLS_RECORD_APPLICATION);
record.extend_from_slice(&TLS_VERSION);
record.extend_from_slice(&(chunk.len() as u16).to_be_bytes());
record.extend_from_slice(chunk);
}
record record
} }
@ -1312,3 +1324,7 @@ mod masking_probe_evasion_blackhat_tests;
#[cfg(test)] #[cfg(test)]
#[path = "tests/client_beobachten_ttl_bounds_security_tests.rs"] #[path = "tests/client_beobachten_ttl_bounds_security_tests.rs"]
mod beobachten_ttl_bounds_security_tests; mod beobachten_ttl_bounds_security_tests;
#[cfg(test)]
#[path = "tests/client_tls_record_wrap_hardening_security_tests.rs"]
mod tls_record_wrap_hardening_security_tests;

View File

@ -24,13 +24,13 @@ use crate::proxy::route_mode::{
use crate::stats::Stats; use crate::stats::Stats;
use crate::stream::{BufferPool, CryptoReader, CryptoWriter}; use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
use crate::transport::UpstreamManager; use crate::transport::UpstreamManager;
#[cfg(unix)]
use nix::fcntl::{Flock, FlockArg, OFlag, openat};
#[cfg(unix)]
use nix::sys::stat::Mode;
#[cfg(unix)]
use std::os::unix::ffi::OsStrExt;
#[cfg(unix)] #[cfg(unix)]
use std::os::unix::fs::OpenOptionsExt; use std::os::unix::fs::OpenOptionsExt;
#[cfg(unix)]
use std::os::unix::io::{AsRawFd, FromRawFd};
const UNKNOWN_DC_LOG_DISTINCT_LIMIT: usize = 1024; const UNKNOWN_DC_LOG_DISTINCT_LIMIT: usize = 1024;
static LOGGED_UNKNOWN_DCS: OnceLock<Mutex<HashSet<i16>>> = OnceLock::new(); static LOGGED_UNKNOWN_DCS: OnceLock<Mutex<HashSet<i16>>> = OnceLock::new();
@ -170,32 +170,16 @@ fn open_unknown_dc_log_append_anchored(
.custom_flags(libc::O_DIRECTORY | libc::O_NOFOLLOW | libc::O_CLOEXEC) .custom_flags(libc::O_DIRECTORY | libc::O_NOFOLLOW | libc::O_CLOEXEC)
.open(&path.allowed_parent)?; .open(&path.allowed_parent)?;
let file_name = let oflags = OFlag::O_CREAT
std::ffi::CString::new(path.file_name.as_os_str().as_bytes()).map_err(|_| { | OFlag::O_APPEND
std::io::Error::new( | OFlag::O_WRONLY
std::io::ErrorKind::InvalidInput, | OFlag::O_NOFOLLOW
"unknown DC log file name contains NUL byte", | OFlag::O_CLOEXEC;
) let mode = Mode::from_bits_truncate(0o600);
})?; let path_component = Path::new(path.file_name.as_os_str());
let fd = openat(&parent, path_component, oflags, mode)
let fd = unsafe { .map_err(|err| std::io::Error::from_raw_os_error(err as i32))?;
libc::openat( let file = std::fs::File::from(fd);
parent.as_raw_fd(),
file_name.as_ptr(),
libc::O_CREAT
| libc::O_APPEND
| libc::O_WRONLY
| libc::O_NOFOLLOW
| libc::O_CLOEXEC,
0o600,
)
};
if fd < 0 {
return Err(std::io::Error::last_os_error());
}
let file = unsafe { std::fs::File::from_raw_fd(fd) };
Ok(file) Ok(file)
} }
#[cfg(not(unix))] #[cfg(not(unix))]
@ -211,16 +195,13 @@ fn open_unknown_dc_log_append_anchored(
fn append_unknown_dc_line(file: &mut std::fs::File, dc_idx: i16) -> std::io::Result<()> { fn append_unknown_dc_line(file: &mut std::fs::File, dc_idx: i16) -> std::io::Result<()> {
#[cfg(unix)] #[cfg(unix)]
{ {
if unsafe { libc::flock(file.as_raw_fd(), libc::LOCK_EX) } != 0 { let cloned = file.try_clone()?;
return Err(std::io::Error::last_os_error()); let mut locked = Flock::lock(cloned, FlockArg::LockExclusive)
} .map_err(|(_, err)| std::io::Error::from_raw_os_error(err as i32))?;
let write_result = writeln!(&mut *locked, "dc_idx={dc_idx}");
let write_result = writeln!(file, "dc_idx={dc_idx}"); let _ = locked
.unlock()
if unsafe { libc::flock(file.as_raw_fd(), libc::LOCK_UN) } != 0 { .map_err(|(_, err)| std::io::Error::from_raw_os_error(err as i32))?;
return Err(std::io::Error::last_os_error());
}
write_result write_result
} }
#[cfg(not(unix))] #[cfg(not(unix))]

View File

@ -626,7 +626,7 @@ where
let cached = if config.censorship.tls_emulation { let cached = if config.censorship.tls_emulation {
if let Some(cache) = tls_cache.as_ref() { if let Some(cache) = tls_cache.as_ref() {
let selected_domain = if let Some(sni) = client_sni.as_ref() { let selected_domain = if let Some(sni) = client_sni.as_ref() {
if cache.contains_domain(&sni).await { if cache.contains_domain(sni).await {
sni.clone() sni.clone()
} else { } else {
config.censorship.tls_domain.clone() config.censorship.tls_domain.clone()
@ -954,7 +954,6 @@ pub fn encrypt_tg_nonce_with_ciphers(nonce: &[u8; HANDSHAKE_LEN]) -> (Vec<u8>, A
} }
/// Encrypt nonce for sending to Telegram (legacy function for compatibility) /// Encrypt nonce for sending to Telegram (legacy function for compatibility)
pub fn encrypt_tg_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> { pub fn encrypt_tg_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
let (encrypted, _, _) = encrypt_tg_nonce_with_ciphers(nonce); let (encrypted, _, _) = encrypt_tg_nonce_with_ciphers(nonce);
encrypted encrypted

View File

@ -98,6 +98,7 @@ async fn maybe_write_shape_padding<W>(
cap: usize, cap: usize,
above_cap_blur: bool, above_cap_blur: bool,
above_cap_blur_max_bytes: usize, above_cap_blur_max_bytes: usize,
aggressive_mode: bool,
) where ) where
W: AsyncWrite + Unpin, W: AsyncWrite + Unpin,
{ {
@ -107,7 +108,11 @@ async fn maybe_write_shape_padding<W>(
let target_total = if total_sent >= cap && above_cap_blur && above_cap_blur_max_bytes > 0 { let target_total = if total_sent >= cap && above_cap_blur && above_cap_blur_max_bytes > 0 {
let mut rng = rand::rng(); let mut rng = rand::rng();
let extra = rng.random_range(0..=above_cap_blur_max_bytes); let extra = if aggressive_mode {
rng.random_range(1..=above_cap_blur_max_bytes)
} else {
rng.random_range(0..=above_cap_blur_max_bytes)
};
total_sent.saturating_add(extra) total_sent.saturating_add(extra)
} else { } else {
next_mask_shape_bucket(total_sent, floor, cap) next_mask_shape_bucket(total_sent, floor, cap)
@ -316,11 +321,11 @@ pub async fn handle_bad_client<R, W>(
peer, peer,
local_addr, local_addr,
); );
if let Some(header) = proxy_header { if let Some(header) = proxy_header
if !write_proxy_header_with_timeout(&mut mask_write, &header).await { && !write_proxy_header_with_timeout(&mut mask_write, &header).await
wait_mask_outcome_budget(outcome_started, config).await; {
return; wait_mask_outcome_budget(outcome_started, config).await;
} return;
} }
if timeout( if timeout(
MASK_RELAY_TIMEOUT, MASK_RELAY_TIMEOUT,
@ -335,6 +340,7 @@ pub async fn handle_bad_client<R, W>(
config.censorship.mask_shape_bucket_cap_bytes, config.censorship.mask_shape_bucket_cap_bytes,
config.censorship.mask_shape_above_cap_blur, config.censorship.mask_shape_above_cap_blur,
config.censorship.mask_shape_above_cap_blur_max_bytes, config.censorship.mask_shape_above_cap_blur_max_bytes,
config.censorship.mask_shape_hardening_aggressive_mode,
), ),
) )
.await .await
@ -387,11 +393,11 @@ pub async fn handle_bad_client<R, W>(
build_mask_proxy_header(config.censorship.mask_proxy_protocol, peer, local_addr); build_mask_proxy_header(config.censorship.mask_proxy_protocol, peer, local_addr);
let (mask_read, mut mask_write) = stream.into_split(); let (mask_read, mut mask_write) = stream.into_split();
if let Some(header) = proxy_header { if let Some(header) = proxy_header
if !write_proxy_header_with_timeout(&mut mask_write, &header).await { && !write_proxy_header_with_timeout(&mut mask_write, &header).await
wait_mask_outcome_budget(outcome_started, config).await; {
return; wait_mask_outcome_budget(outcome_started, config).await;
} return;
} }
if timeout( if timeout(
MASK_RELAY_TIMEOUT, MASK_RELAY_TIMEOUT,
@ -406,6 +412,7 @@ pub async fn handle_bad_client<R, W>(
config.censorship.mask_shape_bucket_cap_bytes, config.censorship.mask_shape_bucket_cap_bytes,
config.censorship.mask_shape_above_cap_blur, config.censorship.mask_shape_above_cap_blur,
config.censorship.mask_shape_above_cap_blur_max_bytes, config.censorship.mask_shape_above_cap_blur_max_bytes,
config.censorship.mask_shape_hardening_aggressive_mode,
), ),
) )
.await .await
@ -441,6 +448,7 @@ async fn relay_to_mask<R, W, MR, MW>(
shape_bucket_cap_bytes: usize, shape_bucket_cap_bytes: usize,
shape_above_cap_blur: bool, shape_above_cap_blur: bool,
shape_above_cap_blur_max_bytes: usize, shape_above_cap_blur_max_bytes: usize,
shape_hardening_aggressive_mode: bool,
) where ) where
R: AsyncRead + Unpin + Send + 'static, R: AsyncRead + Unpin + Send + 'static,
W: AsyncWrite + Unpin + Send + 'static, W: AsyncWrite + Unpin + Send + 'static,
@ -455,31 +463,32 @@ async fn relay_to_mask<R, W, MR, MW>(
return; return;
} }
let _ = tokio::join!( let (upstream_copy, downstream_copy) = tokio::join!(
async { async { copy_with_idle_timeout(&mut reader, &mut mask_write).await },
let copied = copy_with_idle_timeout(&mut reader, &mut mask_write).await; async { copy_with_idle_timeout(&mut mask_read, &mut writer).await }
let total_sent = initial_data.len().saturating_add(copied.total);
let should_shape =
shape_hardening_enabled && copied.ended_by_eof && !initial_data.is_empty();
maybe_write_shape_padding(
&mut mask_write,
total_sent,
should_shape,
shape_bucket_floor_bytes,
shape_bucket_cap_bytes,
shape_above_cap_blur,
shape_above_cap_blur_max_bytes,
)
.await;
let _ = mask_write.shutdown().await;
},
async {
let _ = copy_with_idle_timeout(&mut mask_read, &mut writer).await;
let _ = writer.shutdown().await;
}
); );
let total_sent = initial_data.len().saturating_add(upstream_copy.total);
let should_shape = shape_hardening_enabled
&& !initial_data.is_empty()
&& (upstream_copy.ended_by_eof
|| (shape_hardening_aggressive_mode && downstream_copy.total == 0));
maybe_write_shape_padding(
&mut mask_write,
total_sent,
should_shape,
shape_bucket_floor_bytes,
shape_bucket_cap_bytes,
shape_above_cap_blur,
shape_above_cap_blur_max_bytes,
shape_hardening_aggressive_mode,
)
.await;
let _ = mask_write.shutdown().await;
let _ = writer.shutdown().await;
} }
/// Just consume all data from client without responding /// Just consume all data from client without responding
@ -528,6 +537,14 @@ mod masking_shape_guard_adversarial_tests;
#[path = "tests/masking_shape_classifier_resistance_adversarial_tests.rs"] #[path = "tests/masking_shape_classifier_resistance_adversarial_tests.rs"]
mod masking_shape_classifier_resistance_adversarial_tests; mod masking_shape_classifier_resistance_adversarial_tests;
#[cfg(test)]
#[path = "tests/masking_shape_bypass_blackhat_tests.rs"]
mod masking_shape_bypass_blackhat_tests;
#[cfg(test)]
#[path = "tests/masking_aggressive_mode_security_tests.rs"]
mod masking_aggressive_mode_security_tests;
#[cfg(test)] #[cfg(test)]
#[path = "tests/masking_timing_sidechannel_redteam_expected_fail_tests.rs"] #[path = "tests/masking_timing_sidechannel_redteam_expected_fail_tests.rs"]
mod masking_timing_sidechannel_redteam_expected_fail_tests; mod masking_timing_sidechannel_redteam_expected_fail_tests;

View File

@ -1,7 +1,6 @@
use std::collections::hash_map::RandomState; use std::collections::hash_map::RandomState;
use std::collections::{BTreeSet, HashMap}; use std::collections::{BTreeSet, HashMap};
use std::hash::BuildHasher; use std::hash::{BuildHasher, Hash};
use std::hash::{Hash, Hasher};
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::{Arc, Mutex, OnceLock}; use std::sync::{Arc, Mutex, OnceLock};
@ -50,11 +49,16 @@ const ME_D2C_FLUSH_BATCH_MAX_BYTES_MIN: usize = 4096;
const QUOTA_USER_LOCKS_MAX: usize = 64; const QUOTA_USER_LOCKS_MAX: usize = 64;
#[cfg(not(test))] #[cfg(not(test))]
const QUOTA_USER_LOCKS_MAX: usize = 4_096; const QUOTA_USER_LOCKS_MAX: usize = 4_096;
#[cfg(test)]
const QUOTA_OVERFLOW_LOCK_STRIPES: usize = 16;
#[cfg(not(test))]
const QUOTA_OVERFLOW_LOCK_STRIPES: usize = 256;
static DESYNC_DEDUP: OnceLock<DashMap<u64, Instant>> = OnceLock::new(); static DESYNC_DEDUP: OnceLock<DashMap<u64, Instant>> = OnceLock::new();
static DESYNC_HASHER: OnceLock<RandomState> = OnceLock::new(); static DESYNC_HASHER: OnceLock<RandomState> = OnceLock::new();
static DESYNC_FULL_CACHE_LAST_EMIT_AT: OnceLock<Mutex<Option<Instant>>> = OnceLock::new(); static DESYNC_FULL_CACHE_LAST_EMIT_AT: OnceLock<Mutex<Option<Instant>>> = OnceLock::new();
static DESYNC_DEDUP_EVER_SATURATED: OnceLock<AtomicBool> = OnceLock::new(); static DESYNC_DEDUP_EVER_SATURATED: OnceLock<AtomicBool> = OnceLock::new();
static QUOTA_USER_LOCKS: OnceLock<DashMap<String, Arc<AsyncMutex<()>>>> = OnceLock::new(); static QUOTA_USER_LOCKS: OnceLock<DashMap<String, Arc<AsyncMutex<()>>>> = OnceLock::new();
static QUOTA_USER_OVERFLOW_LOCKS: OnceLock<Vec<Arc<AsyncMutex<()>>>> = OnceLock::new();
static RELAY_IDLE_CANDIDATE_REGISTRY: OnceLock<Mutex<RelayIdleCandidateRegistry>> = OnceLock::new(); static RELAY_IDLE_CANDIDATE_REGISTRY: OnceLock<Mutex<RelayIdleCandidateRegistry>> = OnceLock::new();
static RELAY_IDLE_MARK_SEQ: AtomicU64 = AtomicU64::new(0); static RELAY_IDLE_MARK_SEQ: AtomicU64 = AtomicU64::new(0);
@ -286,9 +290,7 @@ impl MeD2cFlushPolicy {
fn hash_value<T: Hash>(value: &T) -> u64 { fn hash_value<T: Hash>(value: &T) -> u64 {
let state = DESYNC_HASHER.get_or_init(RandomState::new); let state = DESYNC_HASHER.get_or_init(RandomState::new);
let mut hasher = state.build_hasher(); state.hash_one(value)
value.hash(&mut hasher);
hasher.finish()
} }
fn hash_ip(ip: IpAddr) -> u64 { fn hash_ip(ip: IpAddr) -> u64 {
@ -416,6 +418,13 @@ fn desync_dedup_test_lock() -> &'static Mutex<()> {
TEST_LOCK.get_or_init(|| Mutex::new(())) TEST_LOCK.get_or_init(|| Mutex::new(()))
} }
fn desync_forensics_len_bytes(len: usize) -> ([u8; 4], bool) {
match u32::try_from(len) {
Ok(value) => (value.to_le_bytes(), false),
Err(_) => (u32::MAX.to_le_bytes(), true),
}
}
fn report_desync_frame_too_large( fn report_desync_frame_too_large(
state: &RelayForensicsState, state: &RelayForensicsState,
proto_tag: ProtoTag, proto_tag: ProtoTag,
@ -425,7 +434,8 @@ fn report_desync_frame_too_large(
raw_len_bytes: Option<[u8; 4]>, raw_len_bytes: Option<[u8; 4]>,
stats: &Stats, stats: &Stats,
) -> ProxyError { ) -> ProxyError {
let len_buf = raw_len_bytes.unwrap_or((len as u32).to_le_bytes()); let (fallback_len_buf, len_buf_truncated) = desync_forensics_len_bytes(len);
let len_buf = raw_len_bytes.unwrap_or(fallback_len_buf);
let looks_like_tls = raw_len_bytes let looks_like_tls = raw_len_bytes
.map(|b| b[0] == 0x16 && b[1] == 0x03) .map(|b| b[0] == 0x16 && b[1] == 0x03)
.unwrap_or(false); .unwrap_or(false);
@ -461,6 +471,7 @@ fn report_desync_frame_too_large(
bytes_me2c, bytes_me2c,
raw_len = len, raw_len = len,
raw_len_hex = format_args!("0x{:08x}", len), raw_len_hex = format_args!("0x{:08x}", len),
raw_len_bytes_truncated = len_buf_truncated,
raw_bytes = format_args!( raw_bytes = format_args!(
"{:02x} {:02x} {:02x} {:02x}", "{:02x} {:02x} {:02x} {:02x}",
len_buf[0], len_buf[1], len_buf[2], len_buf[3] len_buf[0], len_buf[1], len_buf[2], len_buf[3]
@ -527,6 +538,30 @@ fn quota_would_be_exceeded_for_user(
}) })
} }
#[cfg(test)]
fn quota_user_lock_test_guard() -> &'static Mutex<()> {
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
TEST_LOCK.get_or_init(|| Mutex::new(()))
}
#[cfg(test)]
fn quota_user_lock_test_scope() -> std::sync::MutexGuard<'static, ()> {
quota_user_lock_test_guard()
.lock()
.unwrap_or_else(|poisoned| poisoned.into_inner())
}
fn quota_overflow_user_lock(user: &str) -> Arc<AsyncMutex<()>> {
let stripes = QUOTA_USER_OVERFLOW_LOCKS.get_or_init(|| {
(0..QUOTA_OVERFLOW_LOCK_STRIPES)
.map(|_| Arc::new(AsyncMutex::new(())))
.collect()
});
let hash = crc32fast::hash(user.as_bytes()) as usize;
Arc::clone(&stripes[hash % stripes.len()])
}
fn quota_user_lock(user: &str) -> Arc<AsyncMutex<()>> { fn quota_user_lock(user: &str) -> Arc<AsyncMutex<()>> {
let locks = QUOTA_USER_LOCKS.get_or_init(DashMap::new); let locks = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
if let Some(existing) = locks.get(user) { if let Some(existing) = locks.get(user) {
@ -538,7 +573,7 @@ fn quota_user_lock(user: &str) -> Arc<AsyncMutex<()>> {
} }
if locks.len() >= QUOTA_USER_LOCKS_MAX { if locks.len() >= QUOTA_USER_LOCKS_MAX {
return Arc::new(AsyncMutex::new(())); return quota_overflow_user_lock(user);
} }
let created = Arc::new(AsyncMutex::new(())); let created = Arc::new(AsyncMutex::new(()));
@ -686,7 +721,6 @@ where
.max(C2ME_CHANNEL_CAPACITY_FALLBACK); .max(C2ME_CHANNEL_CAPACITY_FALLBACK);
let (c2me_tx, mut c2me_rx) = mpsc::channel::<C2MeCommand>(c2me_channel_capacity); let (c2me_tx, mut c2me_rx) = mpsc::channel::<C2MeCommand>(c2me_channel_capacity);
let me_pool_c2me = me_pool.clone(); let me_pool_c2me = me_pool.clone();
let effective_tag = effective_tag;
let c2me_sender = tokio::spawn(async move { let c2me_sender = tokio::spawn(async move {
let mut sent_since_yield = 0usize; let mut sent_since_yield = 0usize;
while let Some(cmd) = c2me_rx.recv().await { while let Some(cmd) = c2me_rx.recv().await {
@ -1522,6 +1556,31 @@ where
} }
} }
fn compute_intermediate_secure_wire_len(
data_len: usize,
padding_len: usize,
quickack: bool,
) -> Result<(u32, usize)> {
let wire_len = data_len
.checked_add(padding_len)
.ok_or_else(|| ProxyError::Proxy("Frame length overflow".into()))?;
if wire_len > 0x7fff_ffffusize {
return Err(ProxyError::Proxy(format!(
"Intermediate/Secure frame too large: {wire_len}"
)));
}
let total = 4usize
.checked_add(wire_len)
.ok_or_else(|| ProxyError::Proxy("Frame buffer size overflow".into()))?;
let mut len_val = u32::try_from(wire_len)
.map_err(|_| ProxyError::Proxy("Frame length conversion overflow".into()))?;
if quickack {
len_val |= 0x8000_0000;
}
Ok((len_val, total))
}
async fn write_client_payload<W>( async fn write_client_payload<W>(
client_writer: &mut CryptoWriter<W>, client_writer: &mut CryptoWriter<W>,
proto_tag: ProtoTag, proto_tag: ProtoTag,
@ -1591,11 +1650,8 @@ where
} else { } else {
0 0
}; };
let mut len_val = (data.len() + padding_len) as u32; let (len_val, total) =
if quickack { compute_intermediate_secure_wire_len(data.len(), padding_len, quickack)?;
len_val |= 0x8000_0000;
}
let total = 4 + data.len() + padding_len;
frame_buf.clear(); frame_buf.clear();
frame_buf.reserve(total); frame_buf.reserve(total);
frame_buf.extend_from_slice(&len_val.to_le_bytes()); frame_buf.extend_from_slice(&len_val.to_le_bytes());
@ -1645,3 +1701,23 @@ mod idle_policy_security_tests;
#[cfg(test)] #[cfg(test)]
#[path = "tests/middle_relay_desync_all_full_dedup_security_tests.rs"] #[path = "tests/middle_relay_desync_all_full_dedup_security_tests.rs"]
mod desync_all_full_dedup_security_tests; mod desync_all_full_dedup_security_tests;
#[cfg(test)]
#[path = "tests/middle_relay_stub_completion_security_tests.rs"]
mod stub_completion_security_tests;
#[cfg(test)]
#[path = "tests/middle_relay_coverage_high_risk_security_tests.rs"]
mod coverage_high_risk_security_tests;
#[cfg(test)]
#[path = "tests/middle_relay_quota_overflow_lock_security_tests.rs"]
mod quota_overflow_lock_security_tests;
#[cfg(test)]
#[path = "tests/middle_relay_length_cast_hardening_security_tests.rs"]
mod length_cast_hardening_security_tests;
#[cfg(test)]
#[path = "tests/middle_relay_blackhat_campaign_integration_tests.rs"]
mod blackhat_campaign_integration_tests;

View File

@ -1,5 +1,63 @@
//! Proxy Defs //! Proxy Defs
// Apply strict linting to proxy production code while keeping test builds noise-tolerant.
#![cfg_attr(test, allow(warnings))]
#![cfg_attr(not(test), forbid(clippy::undocumented_unsafe_blocks))]
#![cfg_attr(
not(test),
deny(
clippy::unwrap_used,
clippy::expect_used,
clippy::panic,
clippy::todo,
clippy::unimplemented,
clippy::correctness,
clippy::option_if_let_else,
clippy::or_fun_call,
clippy::branches_sharing_code,
clippy::single_option_map,
clippy::useless_let_if_seq,
clippy::redundant_locals,
clippy::cloned_ref_to_slice_refs,
unsafe_code,
clippy::await_holding_lock,
clippy::await_holding_refcell_ref,
clippy::debug_assert_with_mut_call,
clippy::macro_use_imports,
clippy::cast_ptr_alignment,
clippy::cast_lossless,
clippy::ptr_as_ptr,
clippy::large_stack_arrays,
clippy::same_functions_in_if_condition,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
rust_2018_idioms
)
)]
#![cfg_attr(
not(test),
allow(
clippy::use_self,
clippy::redundant_closure,
clippy::too_many_arguments,
clippy::doc_markdown,
clippy::missing_const_for_fn,
clippy::unnecessary_operation,
clippy::redundant_pub_crate,
clippy::derive_partial_eq_without_eq,
clippy::type_complexity,
clippy::new_ret_no_self,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::significant_drop_tightening,
clippy::significant_drop_in_scrutinee,
clippy::float_cmp,
clippy::nursery
)
)]
pub mod adaptive_buffers; pub mod adaptive_buffers;
pub mod client; pub mod client;
pub mod direct_relay; pub mod direct_relay;

View File

@ -1,3 +1,5 @@
#![allow(dead_code)]
/// Session eviction is intentionally disabled in runtime. /// Session eviction is intentionally disabled in runtime.
/// ///
/// The initial `user+dc` single-lease model caused valid parallel client /// The initial `user+dc` single-lease model caused valid parallel client

View File

@ -64,7 +64,7 @@ async fn user_connection_reservation_drop_enqueues_cleanup_synchronously() {
drop(reservation); drop(reservation);
// The IP is now inside the cleanup_queue, check that the queue has length 1 // The IP is now inside the cleanup_queue, check that the queue has length 1
let queue_len = ip_tracker.cleanup_queue.lock().unwrap().len(); let queue_len = ip_tracker.cleanup_queue_len_for_tests();
assert_eq!( assert_eq!(
queue_len, 1, queue_len, 1,
"Reservation drop must push directly to synchronized IP queue" "Reservation drop must push directly to synchronized IP queue"

View File

@ -0,0 +1,37 @@
use super::*;
#[test]
fn wrap_tls_application_record_empty_payload_emits_zero_length_record() {
let record = wrap_tls_application_record(&[]);
assert_eq!(record.len(), 5);
assert_eq!(record[0], TLS_RECORD_APPLICATION);
assert_eq!(&record[1..3], &TLS_VERSION);
assert_eq!(&record[3..5], &0u16.to_be_bytes());
}
#[test]
fn wrap_tls_application_record_oversized_payload_is_chunked_without_truncation() {
let total = (u16::MAX as usize) + 37;
let payload = vec![0xA5u8; total];
let record = wrap_tls_application_record(&payload);
let mut offset = 0usize;
let mut recovered = Vec::with_capacity(total);
let mut frames = 0usize;
while offset + 5 <= record.len() {
assert_eq!(record[offset], TLS_RECORD_APPLICATION);
assert_eq!(&record[offset + 1..offset + 3], &TLS_VERSION);
let len = u16::from_be_bytes([record[offset + 3], record[offset + 4]]) as usize;
let body_start = offset + 5;
let body_end = body_start + len;
assert!(body_end <= record.len(), "declared TLS record length must be in-bounds");
recovered.extend_from_slice(&record[body_start..body_end]);
offset = body_end;
frames += 1;
}
assert_eq!(offset, record.len(), "record parser must consume exact output size");
assert_eq!(frames, 2, "oversized payload should split into exactly two records");
assert_eq!(recovered, payload, "chunked records must preserve full payload");
}

View File

@ -757,6 +757,284 @@ fn adversarial_parent_swap_after_check_is_blocked_by_anchored_open() {
); );
} }
#[cfg(unix)]
#[test]
fn anchored_open_nix_path_writes_expected_lines() {
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-anchored-open-ok-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("anchored-open-ok base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-anchored-open-ok-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
let _ = fs::remove_file(&sanitized.resolved_path);
let mut first = open_unknown_dc_log_append_anchored(&sanitized)
.expect("anchored open must create log file in allowed parent");
append_unknown_dc_line(&mut first, 31_200).expect("first append must succeed");
let mut second = open_unknown_dc_log_append_anchored(&sanitized)
.expect("anchored reopen must succeed for existing regular file");
append_unknown_dc_line(&mut second, 31_201).expect("second append must succeed");
let content =
fs::read_to_string(&sanitized.resolved_path).expect("anchored log file must be readable");
let lines: Vec<&str> = content.lines().filter(|line| !line.trim().is_empty()).collect();
assert_eq!(lines.len(), 2, "expected one line per anchored append call");
assert!(
lines.contains(&"dc_idx=31200") && lines.contains(&"dc_idx=31201"),
"anchored append output must contain both expected dc_idx lines"
);
}
#[cfg(unix)]
#[test]
fn anchored_open_parallel_appends_preserve_line_integrity() {
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-anchored-open-parallel-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("anchored-open-parallel base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-anchored-open-parallel-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
let _ = fs::remove_file(&sanitized.resolved_path);
let mut workers = Vec::new();
for idx in 0..64i16 {
let sanitized = sanitized.clone();
workers.push(std::thread::spawn(move || {
let mut file = open_unknown_dc_log_append_anchored(&sanitized)
.expect("anchored open must succeed in worker");
append_unknown_dc_line(&mut file, 32_000 + idx).expect("worker append must succeed");
}));
}
for worker in workers {
worker.join().expect("worker must not panic");
}
let content =
fs::read_to_string(&sanitized.resolved_path).expect("parallel log file must be readable");
let lines: Vec<&str> = content.lines().filter(|line| !line.trim().is_empty()).collect();
assert_eq!(lines.len(), 64, "expected one complete line per worker append");
for line in lines {
assert!(
line.starts_with("dc_idx="),
"line must keep dc_idx prefix and not be interleaved: {line}"
);
let value = line
.strip_prefix("dc_idx=")
.expect("prefix checked above")
.parse::<i16>();
assert!(
value.is_ok(),
"line payload must remain parseable i16 and not be corrupted: {line}"
);
}
}
#[cfg(unix)]
#[test]
fn anchored_open_creates_private_0600_file_permissions() {
use std::os::unix::fs::PermissionsExt;
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-anchored-perms-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("anchored-perms base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-anchored-perms-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
let _ = fs::remove_file(&sanitized.resolved_path);
let mut file = open_unknown_dc_log_append_anchored(&sanitized)
.expect("anchored open must create file with restricted mode");
append_unknown_dc_line(&mut file, 31_210).expect("initial append must succeed");
drop(file);
let mode = fs::metadata(&sanitized.resolved_path)
.expect("created log file metadata must be readable")
.permissions()
.mode()
& 0o777;
assert_eq!(
mode, 0o600,
"anchored open must create unknown-dc log file with owner-only rw permissions"
);
}
#[cfg(unix)]
#[test]
fn anchored_open_rejects_existing_symlink_target() {
use std::os::unix::fs::symlink;
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-anchored-symlink-target-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("anchored-symlink-target base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-anchored-symlink-target-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
let outside = std::env::temp_dir().join(format!(
"telemt-unknown-dc-anchored-symlink-outside-{}.log",
std::process::id()
));
fs::write(&outside, "outside\n").expect("outside baseline file must be writable");
let _ = fs::remove_file(&sanitized.resolved_path);
symlink(&outside, &sanitized.resolved_path)
.expect("target symlink for anchored-open rejection test must be creatable");
let err = open_unknown_dc_log_append_anchored(&sanitized)
.expect_err("anchored open must reject symlinked filename target");
assert_eq!(
err.raw_os_error(),
Some(libc::ELOOP),
"anchored open should fail closed with ELOOP on symlinked target"
);
}
#[cfg(unix)]
#[test]
fn anchored_open_high_contention_multi_write_preserves_complete_lines() {
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-anchored-contention-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("anchored-contention base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-anchored-contention-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
let _ = fs::remove_file(&sanitized.resolved_path);
let workers = 24usize;
let rounds = 40usize;
let mut threads = Vec::new();
for worker in 0..workers {
let sanitized = sanitized.clone();
threads.push(std::thread::spawn(move || {
for round in 0..rounds {
let mut file = open_unknown_dc_log_append_anchored(&sanitized)
.expect("anchored open must succeed under contention");
let dc_idx = 20_000i16.wrapping_add((worker * rounds + round) as i16);
append_unknown_dc_line(&mut file, dc_idx)
.expect("each contention append must complete");
}
}));
}
for thread in threads {
thread.join().expect("contention worker must not panic");
}
let content = fs::read_to_string(&sanitized.resolved_path)
.expect("contention output file must be readable");
let lines: Vec<&str> = content.lines().filter(|line| !line.trim().is_empty()).collect();
assert_eq!(
lines.len(),
workers * rounds,
"every contention append must produce exactly one line"
);
let mut unique = std::collections::HashSet::new();
for line in lines {
assert!(
line.starts_with("dc_idx="),
"line must preserve expected prefix under heavy contention: {line}"
);
let value = line
.strip_prefix("dc_idx=")
.expect("prefix validated")
.parse::<i16>()
.expect("line payload must remain parseable i16 under contention");
unique.insert(value);
}
assert_eq!(
unique.len(),
workers * rounds,
"contention output must not lose or duplicate logical writes"
);
}
#[cfg(unix)]
#[test]
fn append_unknown_dc_line_returns_error_for_read_only_descriptor() {
let base = std::env::current_dir()
.expect("cwd must be available")
.join("target")
.join(format!(
"telemt-unknown-dc-append-ro-{}",
std::process::id()
));
fs::create_dir_all(&base).expect("append-ro base must be creatable");
let rel_candidate = format!(
"target/telemt-unknown-dc-append-ro-{}/unknown-dc.log",
std::process::id()
);
let sanitized =
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
fs::write(&sanitized.resolved_path, "seed\n").expect("seed file must be writable");
let mut readonly = std::fs::OpenOptions::new()
.read(true)
.open(&sanitized.resolved_path)
.expect("readonly file open must succeed");
append_unknown_dc_line(&mut readonly, 31_222)
.expect_err("append on readonly descriptor must fail closed");
let content_after =
fs::read_to_string(&sanitized.resolved_path).expect("seed file must remain readable");
assert_eq!(
nonempty_line_count(&content_after),
1,
"failed readonly append must not modify persisted unknown-dc log content"
);
}
#[tokio::test] #[tokio::test]
async fn unknown_dc_absolute_log_path_writes_one_entry() { async fn unknown_dc_absolute_log_path_writes_one_entry() {
let _guard = unknown_dc_test_lock() let _guard = unknown_dc_test_lock()

View File

@ -57,6 +57,25 @@ fn spread_u128(values: &[u128]) -> u128 {
max_v - min_v max_v - min_v
} }
fn interval_gap_usize(a: &BTreeSet<usize>, b: &BTreeSet<usize>) -> usize {
if a.is_empty() || b.is_empty() {
return 0;
}
let a_min = *a.iter().next().unwrap();
let a_max = *a.iter().next_back().unwrap();
let b_min = *b.iter().next().unwrap();
let b_max = *b.iter().next_back().unwrap();
if a_max < b_min {
b_min - a_max
} else if b_max < a_min {
a_min - b_max
} else {
0
}
}
async fn collect_timing_samples(path: PathClass, timing_norm_enabled: bool, n: usize) -> Vec<u128> { async fn collect_timing_samples(path: PathClass, timing_norm_enabled: bool, n: usize) -> Vec<u128> {
let mut out = Vec::with_capacity(n); let mut out = Vec::with_capacity(n);
for _ in 0..n { for _ in 0..n {
@ -266,11 +285,15 @@ async fn integration_ab_harness_envelope_and_blur_improve_obfuscation_vs_baselin
let baseline_overlap = baseline_a.intersection(&baseline_b).count(); let baseline_overlap = baseline_a.intersection(&baseline_b).count();
let hardened_overlap = hardened_a.intersection(&hardened_b).count(); let hardened_overlap = hardened_a.intersection(&hardened_b).count();
let baseline_gap = interval_gap_usize(&baseline_a, &baseline_b);
let hardened_gap = interval_gap_usize(&hardened_a, &hardened_b);
println!( println!(
"ab_harness_length baseline_overlap={} hardened_overlap={} baseline_a={} baseline_b={} hardened_a={} hardened_b={}", "ab_harness_length baseline_overlap={} hardened_overlap={} baseline_gap={} hardened_gap={} baseline_a={} baseline_b={} hardened_a={} hardened_b={}",
baseline_overlap, baseline_overlap,
hardened_overlap, hardened_overlap,
baseline_gap,
hardened_gap,
baseline_a.len(), baseline_a.len(),
baseline_b.len(), baseline_b.len(),
hardened_a.len(), hardened_a.len(),
@ -282,10 +305,20 @@ async fn integration_ab_harness_envelope_and_blur_improve_obfuscation_vs_baselin
"baseline above-cap classes should be disjoint" "baseline above-cap classes should be disjoint"
); );
assert!( assert!(
hardened_overlap > baseline_overlap, hardened_a.len() > baseline_a.len() && hardened_b.len() > baseline_b.len(),
"above-cap blur should increase cross-class overlap: baseline={} hardened={}", "above-cap blur should widen per-class emitted lengths: baseline_a={} baseline_b={} hardened_a={} hardened_b={}",
baseline_a.len(),
baseline_b.len(),
hardened_a.len(),
hardened_b.len()
);
assert!(
hardened_overlap > baseline_overlap || hardened_gap < baseline_gap,
"above-cap blur should reduce class separability via direct overlap or tighter interval gap: baseline_overlap={} hardened_overlap={} baseline_gap={} hardened_gap={}",
baseline_overlap, baseline_overlap,
hardened_overlap hardened_overlap,
baseline_gap,
hardened_gap
); );
} }
@ -451,6 +484,8 @@ async fn timing_classifier_normalized_spread_is_not_worse_than_baseline_for_conn
#[tokio::test] #[tokio::test]
async fn timing_classifier_light_fuzz_pairwise_bucketed_accuracy_stays_bounded_under_normalization() async fn timing_classifier_light_fuzz_pairwise_bucketed_accuracy_stays_bounded_under_normalization()
{ {
const SAMPLE_COUNT: usize = 6;
let pairs = [ let pairs = [
(PathClass::ConnectFail, PathClass::ConnectSuccess), (PathClass::ConnectFail, PathClass::ConnectSuccess),
(PathClass::ConnectFail, PathClass::SlowBackend), (PathClass::ConnectFail, PathClass::SlowBackend),
@ -461,12 +496,14 @@ async fn timing_classifier_light_fuzz_pairwise_bucketed_accuracy_stays_bounded_u
let mut baseline_sum = 0.0f64; let mut baseline_sum = 0.0f64;
let mut hardened_sum = 0.0f64; let mut hardened_sum = 0.0f64;
let mut pair_count = 0usize; let mut pair_count = 0usize;
let acc_quant_step = 1.0 / (2 * SAMPLE_COUNT) as f64;
let tolerated_pair_regression = acc_quant_step + 0.03;
for (a, b) in pairs { for (a, b) in pairs {
let baseline_a = collect_timing_samples(a, false, 6).await; let baseline_a = collect_timing_samples(a, false, SAMPLE_COUNT).await;
let baseline_b = collect_timing_samples(b, false, 6).await; let baseline_b = collect_timing_samples(b, false, SAMPLE_COUNT).await;
let hardened_a = collect_timing_samples(a, true, 6).await; let hardened_a = collect_timing_samples(a, true, SAMPLE_COUNT).await;
let hardened_b = collect_timing_samples(b, true, 6).await; let hardened_b = collect_timing_samples(b, true, SAMPLE_COUNT).await;
let baseline_acc = best_threshold_accuracy_u128( let baseline_acc = best_threshold_accuracy_u128(
&bucketize_ms(&baseline_a, 20), &bucketize_ms(&baseline_a, 20),
@ -482,11 +519,15 @@ async fn timing_classifier_light_fuzz_pairwise_bucketed_accuracy_stays_bounded_u
// Guard hard only on informative baseline pairs. // Guard hard only on informative baseline pairs.
if baseline_acc >= 0.75 { if baseline_acc >= 0.75 {
assert!( assert!(
hardened_acc <= baseline_acc + 0.05, hardened_acc <= baseline_acc + tolerated_pair_regression,
"normalization should not materially worsen informative pair: baseline={baseline_acc:.3} hardened={hardened_acc:.3}" "normalization should not materially worsen informative pair: baseline={baseline_acc:.3} hardened={hardened_acc:.3} tolerated={tolerated_pair_regression:.3}"
); );
} }
println!(
"timing_classifier_pair baseline={baseline_acc:.3} hardened={hardened_acc:.3} tolerated_pair_regression={tolerated_pair_regression:.3}"
);
if hardened_acc + 0.05 <= baseline_acc { if hardened_acc + 0.05 <= baseline_acc {
meaningful_improvement_seen = true; meaningful_improvement_seen = true;
} }
@ -500,7 +541,7 @@ async fn timing_classifier_light_fuzz_pairwise_bucketed_accuracy_stays_bounded_u
let hardened_avg = hardened_sum / pair_count as f64; let hardened_avg = hardened_sum / pair_count as f64;
assert!( assert!(
hardened_avg <= baseline_avg + 0.08, hardened_avg <= baseline_avg + 0.10,
"normalization should not materially increase average pairwise separability: baseline_avg={baseline_avg:.3} hardened_avg={hardened_avg:.3}" "normalization should not materially increase average pairwise separability: baseline_avg={baseline_avg:.3} hardened_avg={hardened_avg:.3}"
); );

View File

@ -0,0 +1,107 @@
use super::*;
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
use tokio::net::TcpListener;
use tokio::time::Duration;
async fn capture_forwarded_len_with_mode(
body_sent: usize,
close_client_after_write: bool,
aggressive_mode: bool,
above_cap_blur: bool,
above_cap_blur_max_bytes: usize,
) -> usize {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut config = ProxyConfig::default();
config.general.beobachten = false;
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = backend_addr.port();
config.censorship.mask_shape_hardening = true;
config.censorship.mask_shape_hardening_aggressive_mode = aggressive_mode;
config.censorship.mask_shape_bucket_floor_bytes = 512;
config.censorship.mask_shape_bucket_cap_bytes = 4096;
config.censorship.mask_shape_above_cap_blur = above_cap_blur;
config.censorship.mask_shape_above_cap_blur_max_bytes = above_cap_blur_max_bytes;
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = Vec::new();
let _ = tokio::time::timeout(Duration::from_secs(2), stream.read_to_end(&mut got)).await;
got.len()
});
let (server_reader, mut client_writer) = duplex(64 * 1024);
let (_client_visible_reader, client_visible_writer) = duplex(64 * 1024);
let peer: SocketAddr = "198.51.100.248:57248".parse().unwrap();
let local: SocketAddr = "127.0.0.1:443".parse().unwrap();
let beobachten = BeobachtenStore::new();
let mut probe = vec![0u8; 5 + body_sent];
probe[0] = 0x16;
probe[1] = 0x03;
probe[2] = 0x01;
probe[3..5].copy_from_slice(&7000u16.to_be_bytes());
probe[5..].fill(0x31);
let fallback = tokio::spawn(async move {
handle_bad_client(
server_reader,
client_visible_writer,
&probe,
peer,
local,
&config,
&beobachten,
)
.await;
});
if close_client_after_write {
client_writer.shutdown().await.unwrap();
} else {
client_writer.write_all(b"keepalive").await.unwrap();
tokio::time::sleep(Duration::from_millis(170)).await;
drop(client_writer);
}
let _ = tokio::time::timeout(Duration::from_secs(4), fallback)
.await
.unwrap()
.unwrap();
tokio::time::timeout(Duration::from_secs(4), accept_task)
.await
.unwrap()
.unwrap()
}
#[tokio::test]
async fn aggressive_mode_shapes_backend_silent_non_eof_path() {
let body_sent = 17usize;
let floor = 512usize;
let legacy = capture_forwarded_len_with_mode(body_sent, false, false, false, 0).await;
let aggressive = capture_forwarded_len_with_mode(body_sent, false, true, false, 0).await;
assert!(legacy < floor, "legacy mode should keep timeout path unshaped");
assert!(
aggressive >= floor,
"aggressive mode must shape backend-silent non-EOF paths (aggressive={aggressive}, floor={floor})"
);
}
#[tokio::test]
async fn aggressive_mode_enforces_positive_above_cap_blur() {
let body_sent = 5000usize;
let base = 5 + body_sent;
for _ in 0..48 {
let observed = capture_forwarded_len_with_mode(body_sent, true, true, true, 1).await;
assert!(
observed > base,
"aggressive mode must not emit exact base length when blur is enabled (observed={observed}, base={base})"
);
}
}

View File

@ -1375,6 +1375,7 @@ async fn relay_to_mask_keeps_backend_to_client_flow_when_client_to_backend_stall
0, 0,
false, false,
0, 0,
false,
) )
.await; .await;
}); });
@ -1494,7 +1495,17 @@ async fn relay_to_mask_timeout_cancels_and_drops_all_io_endpoints() {
let timed = timeout( let timed = timeout(
Duration::from_millis(40), Duration::from_millis(40),
relay_to_mask( relay_to_mask(
reader, writer, mask_read, mask_write, b"", false, 0, 0, false, 0, reader,
writer,
mask_read,
mask_write,
b"",
false,
0,
0,
false,
0,
false,
), ),
) )
.await; .await;

View File

@ -0,0 +1,182 @@
use super::*;
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
use tokio::net::TcpListener;
use tokio::time::Duration;
async fn capture_forwarded_len_with_optional_eof(
body_sent: usize,
shape_hardening: bool,
above_cap_blur: bool,
above_cap_blur_max_bytes: usize,
close_client_after_write: bool,
) -> usize {
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
let backend_addr = listener.local_addr().unwrap();
let mut config = ProxyConfig::default();
config.general.beobachten = false;
config.censorship.mask = true;
config.censorship.mask_host = Some("127.0.0.1".to_string());
config.censorship.mask_port = backend_addr.port();
config.censorship.mask_shape_hardening = shape_hardening;
config.censorship.mask_shape_bucket_floor_bytes = 512;
config.censorship.mask_shape_bucket_cap_bytes = 4096;
config.censorship.mask_shape_above_cap_blur = above_cap_blur;
config.censorship.mask_shape_above_cap_blur_max_bytes = above_cap_blur_max_bytes;
let accept_task = tokio::spawn(async move {
let (mut stream, _) = listener.accept().await.unwrap();
let mut got = Vec::new();
let _ = tokio::time::timeout(Duration::from_secs(2), stream.read_to_end(&mut got)).await;
got.len()
});
let (server_reader, mut client_writer) = duplex(64 * 1024);
let (_client_visible_reader, client_visible_writer) = duplex(64 * 1024);
let peer: SocketAddr = "198.51.100.241:57241".parse().unwrap();
let local: SocketAddr = "127.0.0.1:443".parse().unwrap();
let beobachten = BeobachtenStore::new();
let mut probe = vec![0u8; 5 + body_sent];
probe[0] = 0x16;
probe[1] = 0x03;
probe[2] = 0x01;
probe[3..5].copy_from_slice(&7000u16.to_be_bytes());
probe[5..].fill(0x73);
let fallback = tokio::spawn(async move {
handle_bad_client(
server_reader,
client_visible_writer,
&probe,
peer,
local,
&config,
&beobachten,
)
.await;
});
if close_client_after_write {
client_writer.shutdown().await.unwrap();
} else {
client_writer.write_all(b"keepalive").await.unwrap();
tokio::time::sleep(Duration::from_millis(170)).await;
drop(client_writer);
}
let _ = tokio::time::timeout(Duration::from_secs(4), fallback)
.await
.unwrap()
.unwrap();
tokio::time::timeout(Duration::from_secs(4), accept_task)
.await
.unwrap()
.unwrap()
}
#[tokio::test]
#[ignore = "red-team detector: shaping on non-EOF timeout path is disabled by design to prevent post-timeout tail leaks"]
async fn security_shape_padding_applies_without_client_eof_when_backend_silent() {
let body_sent = 17usize;
let hardened_floor = 512usize;
let with_eof = capture_forwarded_len_with_optional_eof(body_sent, true, false, 0, true).await;
let without_eof =
capture_forwarded_len_with_optional_eof(body_sent, true, false, 0, false).await;
assert!(
with_eof >= hardened_floor,
"EOF path should be shaped to floor (with_eof={with_eof}, floor={hardened_floor})"
);
assert!(
without_eof >= hardened_floor,
"non-EOF path should also be shaped when backend is silent (without_eof={without_eof}, floor={hardened_floor})"
);
}
#[tokio::test]
#[ignore = "red-team detector: blur currently allows zero-extra sample by design within [0..=max] bound"]
async fn security_above_cap_blur_never_emits_exact_base_length() {
let body_sent = 5000usize;
let base = 5 + body_sent;
let max_blur = 1usize;
for _ in 0..64 {
let observed =
capture_forwarded_len_with_optional_eof(body_sent, true, true, max_blur, true).await;
assert!(
observed > base,
"above-cap blur must add at least one byte when enabled (observed={observed}, base={base})"
);
}
}
#[tokio::test]
#[ignore = "red-team detector: shape padding currently depends on EOF, enabling idle-timeout bypass probes"]
async fn redteam_detector_shape_padding_must_not_depend_on_client_eof() {
let body_sent = 17usize;
let hardened_floor = 512usize;
let with_eof = capture_forwarded_len_with_optional_eof(body_sent, true, false, 0, true).await;
let without_eof =
capture_forwarded_len_with_optional_eof(body_sent, true, false, 0, false).await;
assert!(
with_eof >= hardened_floor,
"sanity check failed: EOF path should be shaped to floor (with_eof={with_eof}, floor={hardened_floor})"
);
assert!(
without_eof >= hardened_floor,
"strict anti-probing model expects shaping even without EOF; observed without_eof={without_eof}, floor={hardened_floor}"
);
}
#[tokio::test]
#[ignore = "red-team detector: zero-extra above-cap blur samples leak exact class boundary"]
async fn redteam_detector_above_cap_blur_must_never_emit_exact_base_length() {
let body_sent = 5000usize;
let base = 5 + body_sent;
let mut saw_exact_base = false;
let max_blur = 1usize;
for _ in 0..96 {
let observed =
capture_forwarded_len_with_optional_eof(body_sent, true, true, max_blur, true).await;
if observed == base {
saw_exact_base = true;
break;
}
}
assert!(
!saw_exact_base,
"strict anti-classifier model expects >0 blur always; observed exact base length leaks class"
);
}
#[tokio::test]
#[ignore = "red-team detector: disjoint above-cap ranges enable near-perfect size-class classification"]
async fn redteam_detector_above_cap_blur_ranges_for_far_classes_should_overlap() {
let mut a_min = usize::MAX;
let mut a_max = 0usize;
let mut b_min = usize::MAX;
let mut b_max = 0usize;
for _ in 0..48 {
let a = capture_forwarded_len_with_optional_eof(5000, true, true, 64, true).await;
let b = capture_forwarded_len_with_optional_eof(7000, true, true, 64, true).await;
a_min = a_min.min(a);
a_max = a_max.max(a);
b_min = b_min.min(b);
b_max = b_max.max(b);
}
let overlap = a_min <= b_max && b_min <= a_max;
assert!(
overlap,
"strict anti-classifier model expects overlapping output bands; class_a=[{a_min},{a_max}] class_b=[{b_min},{b_max}]"
);
}

View File

@ -42,6 +42,7 @@ async fn run_relay_case(
cap, cap,
above_cap_blur, above_cap_blur,
above_cap_blur_max_bytes, above_cap_blur_max_bytes,
false,
) )
.await; .await;
}); });

View File

@ -56,14 +56,14 @@ fn shape_bucket_never_drops_below_total_for_valid_ranges() {
#[tokio::test] #[tokio::test]
async fn maybe_write_shape_padding_writes_exact_delta() { async fn maybe_write_shape_padding_writes_exact_delta() {
let mut writer = CountingWriter::new(); let mut writer = CountingWriter::new();
maybe_write_shape_padding(&mut writer, 1200, true, 1000, 1500, false, 0).await; maybe_write_shape_padding(&mut writer, 1200, true, 1000, 1500, false, 0, false).await;
assert_eq!(writer.written, 300); assert_eq!(writer.written, 300);
} }
#[tokio::test] #[tokio::test]
async fn maybe_write_shape_padding_skips_when_disabled() { async fn maybe_write_shape_padding_skips_when_disabled() {
let mut writer = CountingWriter::new(); let mut writer = CountingWriter::new();
maybe_write_shape_padding(&mut writer, 1200, false, 1000, 1500, false, 0).await; maybe_write_shape_padding(&mut writer, 1200, false, 1000, 1500, false, 0, false).await;
assert_eq!(writer.written, 0); assert_eq!(writer.written, 0);
} }
@ -87,6 +87,7 @@ async fn relay_to_mask_applies_cap_clamped_padding_for_non_power_of_two_cap() {
1500, 1500,
false, false,
0, 0,
false,
) )
.await; .await;
}); });

View File

@ -0,0 +1,112 @@
use super::*;
use crate::stats::Stats;
use dashmap::DashMap;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use tokio::sync::Barrier;
use tokio::time::{Duration, timeout};
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn blackhat_campaign_saturation_quota_race_with_queue_pressure_stays_fail_closed() {
let _guard = super::quota_user_lock_test_scope();
let map = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
map.clear();
let mut retained = Vec::with_capacity(QUOTA_USER_LOCKS_MAX);
for idx in 0..QUOTA_USER_LOCKS_MAX {
retained.push(quota_user_lock(&format!(
"middle-blackhat-held-{}-{idx}",
std::process::id()
)));
}
assert_eq!(
map.len(),
QUOTA_USER_LOCKS_MAX,
"precondition: bounded lock cache must be saturated"
);
let (tx, _rx) = mpsc::channel::<C2MeCommand>(1);
tx.send(C2MeCommand::Close)
.await
.expect("queue prefill should succeed");
let pressure_seq_before = relay_pressure_event_seq();
let pressure_errors = Arc::new(AtomicUsize::new(0));
let mut pressure_workers = Vec::new();
for _ in 0..16 {
let tx = tx.clone();
let pressure_errors = Arc::clone(&pressure_errors);
pressure_workers.push(tokio::spawn(async move {
if enqueue_c2me_command(&tx, C2MeCommand::Close).await.is_err() {
pressure_errors.fetch_add(1, Ordering::Relaxed);
}
}));
}
let stats = Arc::new(Stats::new());
let user = format!("middle-blackhat-quota-race-{}", std::process::id());
let gate = Arc::new(Barrier::new(16));
let mut quota_workers = Vec::new();
for _ in 0..16u8 {
let stats = Arc::clone(&stats);
let user = user.clone();
let gate = Arc::clone(&gate);
quota_workers.push(tokio::spawn(async move {
gate.wait().await;
let user_lock = quota_user_lock(&user);
let _quota_guard = user_lock.lock().await;
if quota_would_be_exceeded_for_user(&stats, &user, Some(1), 1) {
return false;
}
stats.add_user_octets_to(&user, 1);
true
}));
}
let mut ok_count = 0usize;
let mut denied_count = 0usize;
for worker in quota_workers {
let result = timeout(Duration::from_secs(2), worker)
.await
.expect("quota worker must finish")
.expect("quota worker must not panic");
if result {
ok_count += 1;
} else {
denied_count += 1;
}
}
for worker in pressure_workers {
timeout(Duration::from_secs(2), worker)
.await
.expect("pressure worker must finish")
.expect("pressure worker must not panic");
}
assert_eq!(
stats.get_user_total_octets(&user),
1,
"black-hat campaign must not overshoot same-user quota under saturation"
);
assert!(ok_count <= 1, "at most one quota contender may succeed");
assert!(
denied_count >= 15,
"all remaining contenders must be quota-denied"
);
let pressure_seq_after = relay_pressure_event_seq();
assert!(
pressure_seq_after > pressure_seq_before,
"queue pressure leg must trigger pressure accounting"
);
assert!(
pressure_errors.load(Ordering::Relaxed) >= 1,
"at least one pressure worker should fail from persistent backpressure"
);
drop(retained);
}

View File

@ -0,0 +1,708 @@
use super::*;
use crate::crypto::AesCtr;
use crate::crypto::SecureRandom;
use crate::stats::Stats;
use crate::stream::{BufferPool, PooledBuffer};
use std::sync::Arc;
use tokio::io::AsyncReadExt;
use tokio::io::duplex;
use tokio::sync::mpsc;
use tokio::time::{Duration as TokioDuration, timeout};
fn make_pooled_payload(data: &[u8]) -> PooledBuffer {
let pool = Arc::new(BufferPool::with_config(data.len().max(1), 4));
let mut payload = pool.get();
payload.resize(data.len(), 0);
payload[..data.len()].copy_from_slice(data);
payload
}
#[tokio::test]
async fn write_client_payload_abridged_short_quickack_sets_flag_and_preserves_payload() {
let (mut read_side, write_side) = duplex(4096);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
let payload = vec![0xA1, 0xB2, 0xC3, 0xD4, 0x10, 0x20, 0x30, 0x40];
write_client_payload(
&mut writer,
ProtoTag::Abridged,
RPC_FLAG_QUICKACK,
&payload,
&rng,
&mut frame_buf,
)
.await
.expect("abridged quickack payload should serialize");
writer.flush().await.expect("flush must succeed");
let mut encrypted = vec![0u8; 1 + payload.len()];
read_side
.read_exact(&mut encrypted)
.await
.expect("must read serialized abridged frame");
let plaintext = decryptor.decrypt(&encrypted);
assert_eq!(plaintext[0], 0x80 | ((payload.len() / 4) as u8));
assert_eq!(&plaintext[1..], payload.as_slice());
}
#[tokio::test]
async fn write_client_payload_abridged_extended_header_is_encoded_correctly() {
let (mut read_side, write_side) = duplex(16 * 1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
// Boundary where abridged switches to extended length encoding.
let payload = vec![0x5Au8; 0x7f * 4];
write_client_payload(
&mut writer,
ProtoTag::Abridged,
RPC_FLAG_QUICKACK,
&payload,
&rng,
&mut frame_buf,
)
.await
.expect("extended abridged payload should serialize");
writer.flush().await.expect("flush must succeed");
let mut encrypted = vec![0u8; 4 + payload.len()];
read_side
.read_exact(&mut encrypted)
.await
.expect("must read serialized extended abridged frame");
let plaintext = decryptor.decrypt(&encrypted);
assert_eq!(plaintext[0], 0xff, "0x7f with quickack bit must be set");
assert_eq!(&plaintext[1..4], &[0x7f, 0x00, 0x00]);
assert_eq!(&plaintext[4..], payload.as_slice());
}
#[tokio::test]
async fn write_client_payload_abridged_misaligned_is_rejected_fail_closed() {
let (_read_side, write_side) = duplex(1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
let err = write_client_payload(
&mut writer,
ProtoTag::Abridged,
0,
&[1, 2, 3],
&rng,
&mut frame_buf,
)
.await
.expect_err("misaligned abridged payload must be rejected");
let msg = format!("{err}");
assert!(
msg.contains("4-byte aligned"),
"error should explain alignment contract, got: {msg}"
);
}
#[tokio::test]
async fn write_client_payload_secure_misaligned_is_rejected_fail_closed() {
let (_read_side, write_side) = duplex(1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
let err = write_client_payload(
&mut writer,
ProtoTag::Secure,
0,
&[9, 8, 7, 6, 5],
&rng,
&mut frame_buf,
)
.await
.expect_err("misaligned secure payload must be rejected");
let msg = format!("{err}");
assert!(
msg.contains("Secure payload must be 4-byte aligned"),
"error should be explicit for fail-closed triage, got: {msg}"
);
}
#[tokio::test]
async fn write_client_payload_intermediate_quickack_sets_length_msb() {
let (mut read_side, write_side) = duplex(4096);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
let payload = b"hello-middle-relay";
write_client_payload(
&mut writer,
ProtoTag::Intermediate,
RPC_FLAG_QUICKACK,
payload,
&rng,
&mut frame_buf,
)
.await
.expect("intermediate quickack payload should serialize");
writer.flush().await.expect("flush must succeed");
let mut encrypted = vec![0u8; 4 + payload.len()];
read_side
.read_exact(&mut encrypted)
.await
.expect("must read intermediate frame");
let plaintext = decryptor.decrypt(&encrypted);
let mut len_bytes = [0u8; 4];
len_bytes.copy_from_slice(&plaintext[..4]);
let len_with_flags = u32::from_le_bytes(len_bytes);
assert_ne!(len_with_flags & 0x8000_0000, 0, "quickack bit must be set");
assert_eq!((len_with_flags & 0x7fff_ffff) as usize, payload.len());
assert_eq!(&plaintext[4..], payload);
}
#[tokio::test]
async fn write_client_payload_secure_quickack_prefix_and_padding_bounds_hold() {
let (mut read_side, write_side) = duplex(4096);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
let payload = vec![0x33u8; 100]; // 4-byte aligned as required by secure mode.
write_client_payload(
&mut writer,
ProtoTag::Secure,
RPC_FLAG_QUICKACK,
&payload,
&rng,
&mut frame_buf,
)
.await
.expect("secure quickack payload should serialize");
writer.flush().await.expect("flush must succeed");
// Secure mode adds 1..=3 bytes of randomized tail padding.
let mut encrypted_header = [0u8; 4];
read_side
.read_exact(&mut encrypted_header)
.await
.expect("must read secure header");
let decrypted_header = decryptor.decrypt(&encrypted_header);
let header: [u8; 4] = decrypted_header
.try_into()
.expect("decrypted secure header must be 4 bytes");
let wire_len_raw = u32::from_le_bytes(header);
assert_ne!(
wire_len_raw & 0x8000_0000,
0,
"secure quickack bit must be set"
);
let wire_len = (wire_len_raw & 0x7fff_ffff) as usize;
assert!(wire_len >= payload.len());
let padding_len = wire_len - payload.len();
assert!(
(1..=3).contains(&padding_len),
"secure writer must add bounded random tail padding, got {padding_len}"
);
let mut encrypted_body = vec![0u8; wire_len];
read_side
.read_exact(&mut encrypted_body)
.await
.expect("must read secure body");
let decrypted_body = decryptor.decrypt(&encrypted_body);
assert_eq!(&decrypted_body[..payload.len()], payload.as_slice());
}
#[tokio::test]
#[ignore = "heavy: allocates >64MiB to validate abridged too-large fail-closed branch"]
async fn write_client_payload_abridged_too_large_is_rejected_fail_closed() {
let (_read_side, write_side) = duplex(1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
// Exactly one 4-byte word above the encodable 24-bit abridged length range.
let payload = vec![0x00u8; (1 << 24) * 4];
let err = write_client_payload(
&mut writer,
ProtoTag::Abridged,
0,
&payload,
&rng,
&mut frame_buf,
)
.await
.expect_err("oversized abridged payload must be rejected");
let msg = format!("{err}");
assert!(
msg.contains("Abridged frame too large"),
"error must clearly indicate oversize fail-close path, got: {msg}"
);
}
#[tokio::test]
async fn write_client_ack_intermediate_is_little_endian() {
let (mut read_side, write_side) = duplex(1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
write_client_ack(&mut writer, ProtoTag::Intermediate, 0x11_22_33_44)
.await
.expect("ack serialization should succeed");
writer.flush().await.expect("flush must succeed");
let mut encrypted = [0u8; 4];
read_side
.read_exact(&mut encrypted)
.await
.expect("must read ack bytes");
let plain = decryptor.decrypt(&encrypted);
assert_eq!(plain.as_slice(), &0x11_22_33_44u32.to_le_bytes());
}
#[tokio::test]
async fn write_client_ack_abridged_is_big_endian() {
let (mut read_side, write_side) = duplex(1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
write_client_ack(&mut writer, ProtoTag::Abridged, 0xDE_AD_BE_EF)
.await
.expect("ack serialization should succeed");
writer.flush().await.expect("flush must succeed");
let mut encrypted = [0u8; 4];
read_side
.read_exact(&mut encrypted)
.await
.expect("must read ack bytes");
let plain = decryptor.decrypt(&encrypted);
assert_eq!(plain.as_slice(), &0xDE_AD_BE_EFu32.to_be_bytes());
}
#[tokio::test]
async fn write_client_payload_abridged_short_boundary_0x7e_is_single_byte_header() {
let (mut read_side, write_side) = duplex(1024 * 1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
let payload = vec![0xABu8; 0x7e * 4];
write_client_payload(
&mut writer,
ProtoTag::Abridged,
0,
&payload,
&rng,
&mut frame_buf,
)
.await
.expect("boundary payload should serialize");
writer.flush().await.expect("flush must succeed");
let mut encrypted = vec![0u8; 1 + payload.len()];
read_side.read_exact(&mut encrypted).await.unwrap();
let plain = decryptor.decrypt(&encrypted);
assert_eq!(plain[0], 0x7e);
assert_eq!(&plain[1..], payload.as_slice());
}
#[tokio::test]
async fn write_client_payload_abridged_extended_without_quickack_has_clean_prefix() {
let (mut read_side, write_side) = duplex(16 * 1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
let payload = vec![0x42u8; 0x80 * 4];
write_client_payload(
&mut writer,
ProtoTag::Abridged,
0,
&payload,
&rng,
&mut frame_buf,
)
.await
.expect("extended payload should serialize");
writer.flush().await.expect("flush must succeed");
let mut encrypted = vec![0u8; 4 + payload.len()];
read_side.read_exact(&mut encrypted).await.unwrap();
let plain = decryptor.decrypt(&encrypted);
assert_eq!(plain[0], 0x7f);
assert_eq!(&plain[1..4], &[0x80, 0x00, 0x00]);
assert_eq!(&plain[4..], payload.as_slice());
}
#[tokio::test]
async fn write_client_payload_intermediate_zero_length_emits_header_only() {
let (mut read_side, write_side) = duplex(1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
write_client_payload(
&mut writer,
ProtoTag::Intermediate,
0,
&[],
&rng,
&mut frame_buf,
)
.await
.expect("zero-length intermediate payload should serialize");
writer.flush().await.expect("flush must succeed");
let mut encrypted = [0u8; 4];
read_side.read_exact(&mut encrypted).await.unwrap();
let plain = decryptor.decrypt(&encrypted);
assert_eq!(plain.as_slice(), &[0, 0, 0, 0]);
}
#[tokio::test]
async fn write_client_payload_intermediate_ignores_unrelated_flags() {
let (mut read_side, write_side) = duplex(1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
let payload = [7u8; 12];
write_client_payload(
&mut writer,
ProtoTag::Intermediate,
0x4000_0000,
&payload,
&rng,
&mut frame_buf,
)
.await
.expect("payload should serialize");
writer.flush().await.expect("flush must succeed");
let mut encrypted = [0u8; 16];
read_side.read_exact(&mut encrypted).await.unwrap();
let plain = decryptor.decrypt(&encrypted);
let len = u32::from_le_bytes(plain[0..4].try_into().unwrap());
assert_eq!(len, payload.len() as u32, "only quickack bit may affect header");
assert_eq!(&plain[4..], payload.as_slice());
}
#[tokio::test]
async fn write_client_payload_secure_without_quickack_keeps_msb_clear() {
let (mut read_side, write_side) = duplex(4096);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
let payload = [0x1Du8; 64];
write_client_payload(
&mut writer,
ProtoTag::Secure,
0,
&payload,
&rng,
&mut frame_buf,
)
.await
.expect("payload should serialize");
writer.flush().await.expect("flush must succeed");
let mut encrypted_header = [0u8; 4];
read_side.read_exact(&mut encrypted_header).await.unwrap();
let plain_header = decryptor.decrypt(&encrypted_header);
let h: [u8; 4] = plain_header.as_slice().try_into().unwrap();
let wire_len_raw = u32::from_le_bytes(h);
assert_eq!(wire_len_raw & 0x8000_0000, 0, "quickack bit must stay clear");
}
#[tokio::test]
async fn secure_padding_light_fuzz_distribution_has_multiple_outcomes() {
let (mut read_side, write_side) = duplex(256 * 1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
let payload = [0x55u8; 100];
let mut seen = [false; 4];
for _ in 0..96 {
write_client_payload(
&mut writer,
ProtoTag::Secure,
0,
&payload,
&rng,
&mut frame_buf,
)
.await
.expect("secure payload should serialize");
writer.flush().await.expect("flush must succeed");
let mut encrypted_header = [0u8; 4];
read_side.read_exact(&mut encrypted_header).await.unwrap();
let plain_header = decryptor.decrypt(&encrypted_header);
let h: [u8; 4] = plain_header.as_slice().try_into().unwrap();
let wire_len = (u32::from_le_bytes(h) & 0x7fff_ffff) as usize;
let padding_len = wire_len - payload.len();
assert!((1..=3).contains(&padding_len));
seen[padding_len] = true;
let mut encrypted_body = vec![0u8; wire_len];
read_side.read_exact(&mut encrypted_body).await.unwrap();
let _ = decryptor.decrypt(&encrypted_body);
}
let distinct = (1..=3).filter(|idx| seen[*idx]).count();
assert!(
distinct >= 2,
"padding generator should not collapse to a single outcome under campaign"
);
}
#[tokio::test]
async fn write_client_payload_mixed_proto_sequence_preserves_stream_sync() {
let (mut read_side, write_side) = duplex(128 * 1024);
let key = [0u8; 32];
let iv = 0u128;
let mut writer = CryptoWriter::new(write_side, AesCtr::new(&key, iv), 8 * 1024);
let mut decryptor = AesCtr::new(&key, iv);
let rng = SecureRandom::new();
let mut frame_buf = Vec::new();
let p1 = vec![1u8; 8];
let p2 = vec![2u8; 16];
let p3 = vec![3u8; 20];
write_client_payload(&mut writer, ProtoTag::Abridged, 0, &p1, &rng, &mut frame_buf)
.await
.unwrap();
write_client_payload(
&mut writer,
ProtoTag::Intermediate,
RPC_FLAG_QUICKACK,
&p2,
&rng,
&mut frame_buf,
)
.await
.unwrap();
write_client_payload(&mut writer, ProtoTag::Secure, 0, &p3, &rng, &mut frame_buf)
.await
.unwrap();
writer.flush().await.unwrap();
// Frame 1: abridged short.
let mut e1 = vec![0u8; 1 + p1.len()];
read_side.read_exact(&mut e1).await.unwrap();
let d1 = decryptor.decrypt(&e1);
assert_eq!(d1[0], (p1.len() / 4) as u8);
assert_eq!(&d1[1..], p1.as_slice());
// Frame 2: intermediate with quickack.
let mut e2 = vec![0u8; 4 + p2.len()];
read_side.read_exact(&mut e2).await.unwrap();
let d2 = decryptor.decrypt(&e2);
let l2 = u32::from_le_bytes(d2[0..4].try_into().unwrap());
assert_ne!(l2 & 0x8000_0000, 0);
assert_eq!((l2 & 0x7fff_ffff) as usize, p2.len());
assert_eq!(&d2[4..], p2.as_slice());
// Frame 3: secure with bounded tail.
let mut e3h = [0u8; 4];
read_side.read_exact(&mut e3h).await.unwrap();
let d3h = decryptor.decrypt(&e3h);
let l3 = (u32::from_le_bytes(d3h.as_slice().try_into().unwrap()) & 0x7fff_ffff) as usize;
assert!(l3 >= p3.len());
assert!((1..=3).contains(&(l3 - p3.len())));
let mut e3b = vec![0u8; l3];
read_side.read_exact(&mut e3b).await.unwrap();
let d3b = decryptor.decrypt(&e3b);
assert_eq!(&d3b[..p3.len()], p3.as_slice());
}
#[test]
fn should_yield_sender_boundary_matrix_blackhat() {
assert!(!should_yield_c2me_sender(0, false));
assert!(!should_yield_c2me_sender(0, true));
assert!(!should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET - 1, true));
assert!(!should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET, false));
assert!(should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET, true));
assert!(should_yield_c2me_sender(
C2ME_SENDER_FAIRNESS_BUDGET.saturating_add(1024),
true
));
}
#[test]
fn should_yield_sender_light_fuzz_matches_oracle() {
let mut s: u64 = 0xD00D_BAAD_F00D_CAFE;
for _ in 0..5000 {
s ^= s << 7;
s ^= s >> 9;
s ^= s << 8;
let sent = (s as usize) & 0x1fff;
let backlog = (s & 1) != 0;
let expected = backlog && sent >= C2ME_SENDER_FAIRNESS_BUDGET;
assert_eq!(should_yield_c2me_sender(sent, backlog), expected);
}
}
#[test]
fn quota_would_be_exceeded_exact_remaining_one_byte() {
let stats = Stats::new();
let user = "quota-edge";
let quota = 100u64;
stats.add_user_octets_to(user, 99);
assert!(
!quota_would_be_exceeded_for_user(&stats, user, Some(quota), 1),
"exactly remaining budget should be allowed"
);
assert!(
quota_would_be_exceeded_for_user(&stats, user, Some(quota), 2),
"one byte beyond remaining budget must be rejected"
);
}
#[test]
fn quota_would_be_exceeded_saturating_edge_remains_fail_closed() {
let stats = Stats::new();
let user = "quota-saturating-edge";
let quota = u64::MAX - 3;
stats.add_user_octets_to(user, u64::MAX - 4);
assert!(
quota_would_be_exceeded_for_user(&stats, user, Some(quota), 2),
"saturating arithmetic edge must stay fail-closed"
);
}
#[test]
fn quota_exceeded_boundary_is_inclusive() {
let stats = Stats::new();
let user = "quota-inclusive-boundary";
stats.add_user_octets_to(user, 50);
assert!(quota_exceeded_for_user(&stats, user, Some(50)));
assert!(!quota_exceeded_for_user(&stats, user, Some(51)));
}
#[tokio::test]
async fn enqueue_c2me_close_fast_path_succeeds_without_backpressure() {
let (tx, mut rx) = mpsc::channel::<C2MeCommand>(4);
enqueue_c2me_command(&tx, C2MeCommand::Close)
.await
.expect("close should enqueue on fast path");
let recv = timeout(TokioDuration::from_millis(50), rx.recv())
.await
.expect("must receive close command")
.expect("close command should be present");
assert!(matches!(recv, C2MeCommand::Close));
}
#[tokio::test]
async fn enqueue_c2me_data_full_then_drain_preserves_order() {
let (tx, mut rx) = mpsc::channel::<C2MeCommand>(1);
tx.send(C2MeCommand::Data {
payload: make_pooled_payload(&[1]),
flags: 10,
})
.await
.unwrap();
let tx2 = tx.clone();
let producer = tokio::spawn(async move {
enqueue_c2me_command(
&tx2,
C2MeCommand::Data {
payload: make_pooled_payload(&[2, 2]),
flags: 20,
},
)
.await
});
tokio::time::sleep(TokioDuration::from_millis(10)).await;
let first = rx.recv().await.expect("first item should exist");
match first {
C2MeCommand::Data { payload, flags } => {
assert_eq!(payload.as_ref(), &[1]);
assert_eq!(flags, 10);
}
C2MeCommand::Close => panic!("unexpected close as first item"),
}
producer.await.unwrap().expect("producer should complete");
let second = timeout(TokioDuration::from_millis(100), rx.recv())
.await
.unwrap()
.expect("second item should exist");
match second {
C2MeCommand::Data { payload, flags } => {
assert_eq!(payload.as_ref(), &[2, 2]);
assert_eq!(flags, 20);
}
C2MeCommand::Close => panic!("unexpected close as second item"),
}
}

View File

@ -0,0 +1,75 @@
use super::*;
use std::sync::Arc;
use std::sync::atomic::AtomicU64;
#[test]
fn intermediate_secure_wire_len_allows_max_31bit_payload() {
let (len_val, total) = compute_intermediate_secure_wire_len(0x7fff_fffe, 1, true)
.expect("31-bit wire length should be accepted");
assert_eq!(len_val, 0xffff_ffff, "quickack must use top bit only");
assert_eq!(total, 0x8000_0003);
}
#[test]
fn intermediate_secure_wire_len_rejects_length_above_31bit_limit() {
let err = compute_intermediate_secure_wire_len(0x7fff_ffff, 1, false)
.expect_err("wire length above 31-bit must fail closed");
assert!(
format!("{err}").contains("frame too large"),
"error should identify oversize frame path"
);
}
#[test]
fn intermediate_secure_wire_len_rejects_addition_overflow() {
let err = compute_intermediate_secure_wire_len(usize::MAX, 1, false)
.expect_err("overflowing addition must fail closed");
assert!(
format!("{err}").contains("overflow"),
"error should clearly report overflow"
);
}
#[test]
fn desync_forensics_len_bytes_marks_truncation_for_oversize_values() {
let (small_bytes, small_truncated) = desync_forensics_len_bytes(0x1020_3040);
assert_eq!(small_bytes, 0x1020_3040u32.to_le_bytes());
assert!(!small_truncated);
let (huge_bytes, huge_truncated) = desync_forensics_len_bytes(usize::MAX);
assert_eq!(huge_bytes, u32::MAX.to_le_bytes());
assert!(huge_truncated);
}
#[test]
fn report_desync_frame_too_large_preserves_full_length_in_error_message() {
let state = RelayForensicsState {
trace_id: 0x1234,
conn_id: 0x5678,
user: "middle-desync-oversize".to_string(),
peer: "198.51.100.55:443".parse().expect("valid test peer"),
peer_hash: 0xAABBCCDD,
started_at: Instant::now(),
bytes_c2me: 7,
bytes_me2c: Arc::new(AtomicU64::new(9)),
desync_all_full: false,
};
let huge_len = usize::MAX;
let err = report_desync_frame_too_large(
&state,
ProtoTag::Intermediate,
3,
1024,
huge_len,
None,
&Stats::new(),
);
let msg = format!("{err}");
assert!(
msg.contains(&huge_len.to_string()),
"error must preserve full usize length for forensics"
);
}

View File

@ -0,0 +1,131 @@
use super::*;
use dashmap::DashMap;
use std::sync::Arc;
#[test]
fn saturation_uses_stable_overflow_lock_without_cache_growth() {
let _guard = super::quota_user_lock_test_scope();
let map = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
map.clear();
let prefix = format!("middle-quota-held-{}", std::process::id());
let mut retained = Vec::with_capacity(QUOTA_USER_LOCKS_MAX);
for idx in 0..QUOTA_USER_LOCKS_MAX {
retained.push(quota_user_lock(&format!("{prefix}-{idx}")));
}
assert_eq!(map.len(), QUOTA_USER_LOCKS_MAX);
let user = format!("middle-quota-overflow-{}", std::process::id());
let first = quota_user_lock(&user);
let second = quota_user_lock(&user);
assert!(
Arc::ptr_eq(&first, &second),
"overflow user must get deterministic same lock while cache is saturated"
);
assert_eq!(
map.len(),
QUOTA_USER_LOCKS_MAX,
"overflow path must not grow bounded lock map"
);
assert!(
map.get(&user).is_none(),
"overflow user should stay outside bounded lock map under saturation"
);
drop(retained);
}
#[test]
fn overflow_striping_keeps_different_users_distributed() {
let _guard = super::quota_user_lock_test_scope();
let map = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
map.clear();
let prefix = format!("middle-quota-dist-held-{}", std::process::id());
let mut retained = Vec::with_capacity(QUOTA_USER_LOCKS_MAX);
for idx in 0..QUOTA_USER_LOCKS_MAX {
retained.push(quota_user_lock(&format!("{prefix}-{idx}")));
}
let a = quota_user_lock("middle-overflow-user-a");
let b = quota_user_lock("middle-overflow-user-b");
let c = quota_user_lock("middle-overflow-user-c");
let distinct = [
Arc::as_ptr(&a) as usize,
Arc::as_ptr(&b) as usize,
Arc::as_ptr(&c) as usize,
]
.iter()
.copied()
.collect::<std::collections::HashSet<_>>()
.len();
assert!(
distinct >= 2,
"striped overflow lock set should avoid collapsing all users to one lock"
);
drop(retained);
}
#[test]
fn reclaim_path_caches_new_user_after_stale_entries_drop() {
let _guard = super::quota_user_lock_test_scope();
let map = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
map.clear();
let prefix = format!("middle-quota-reclaim-held-{}", std::process::id());
let mut retained = Vec::with_capacity(QUOTA_USER_LOCKS_MAX);
for idx in 0..QUOTA_USER_LOCKS_MAX {
retained.push(quota_user_lock(&format!("{prefix}-{idx}")));
}
drop(retained);
let user = format!("middle-quota-reclaim-user-{}", std::process::id());
let got = quota_user_lock(&user);
assert!(map.get(&user).is_some());
assert!(
Arc::strong_count(&got) >= 2,
"after reclaim, lock should be held both by caller and map"
);
}
#[test]
fn overflow_path_same_user_is_stable_across_parallel_threads() {
let _guard = super::quota_user_lock_test_scope();
let map = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
map.clear();
let mut retained = Vec::with_capacity(QUOTA_USER_LOCKS_MAX);
for idx in 0..QUOTA_USER_LOCKS_MAX {
retained.push(quota_user_lock(&format!(
"middle-quota-thread-held-{}-{idx}",
std::process::id()
)));
}
let user = format!("middle-quota-overflow-thread-user-{}", std::process::id());
let mut workers = Vec::new();
for _ in 0..32 {
let user = user.clone();
workers.push(std::thread::spawn(move || quota_user_lock(&user)));
}
let first = workers
.remove(0)
.join()
.expect("thread must return lock handle");
for worker in workers {
let got = worker.join().expect("thread must return lock handle");
assert!(
Arc::ptr_eq(&first, &got),
"same overflow user should resolve to one striped lock even under contention"
);
}
drop(retained);
}

View File

@ -15,7 +15,7 @@ use std::collections::{HashMap, HashSet};
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::sync::{Mutex, OnceLock}; use std::sync::Mutex;
use std::thread; use std::thread;
use tokio::io::AsyncReadExt; use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
@ -38,11 +38,6 @@ fn make_pooled_payload_from(pool: &Arc<BufferPool>, data: &[u8]) -> PooledBuffer
payload payload
} }
fn quota_user_lock_test_lock() -> &'static Mutex<()> {
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
TEST_LOCK.get_or_init(|| Mutex::new(()))
}
#[test] #[test]
fn should_yield_sender_only_on_budget_with_backlog() { fn should_yield_sender_only_on_budget_with_backlog() {
assert!(!should_yield_c2me_sender(0, true)); assert!(!should_yield_c2me_sender(0, true));
@ -243,6 +238,11 @@ fn desync_dedup_cache_is_bounded() {
#[test] #[test]
fn quota_user_lock_cache_reuses_entry_for_same_user() { fn quota_user_lock_cache_reuses_entry_for_same_user() {
let _guard = super::quota_user_lock_test_scope();
let map = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
map.clear();
let a = quota_user_lock("quota-user-a"); let a = quota_user_lock("quota-user-a");
let b = quota_user_lock("quota-user-a"); let b = quota_user_lock("quota-user-a");
assert!(Arc::ptr_eq(&a, &b), "same user must reuse same quota lock"); assert!(Arc::ptr_eq(&a, &b), "same user must reuse same quota lock");
@ -250,9 +250,7 @@ fn quota_user_lock_cache_reuses_entry_for_same_user() {
#[test] #[test]
fn quota_user_lock_cache_is_bounded_under_unique_churn() { fn quota_user_lock_cache_is_bounded_under_unique_churn() {
let _guard = quota_user_lock_test_lock() let _guard = super::quota_user_lock_test_scope();
.lock()
.expect("quota user lock test lock must be available");
let map = QUOTA_USER_LOCKS.get_or_init(DashMap::new); let map = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
map.clear(); map.clear();
@ -270,10 +268,8 @@ fn quota_user_lock_cache_is_bounded_under_unique_churn() {
} }
#[test] #[test]
fn quota_user_lock_cache_saturation_returns_ephemeral_lock_without_growth() { fn quota_user_lock_cache_saturation_returns_stable_overflow_lock_without_growth() {
let _guard = quota_user_lock_test_lock() let _guard = super::quota_user_lock_test_scope();
.lock()
.expect("quota user lock test lock must be available");
let map = QUOTA_USER_LOCKS.get_or_init(DashMap::new); let map = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
for attempt in 0..8u32 { for attempt in 0..8u32 {
@ -305,8 +301,8 @@ fn quota_user_lock_cache_saturation_returns_ephemeral_lock_without_growth() {
"overflow path should not cache new user lock when map is saturated and all entries are retained" "overflow path should not cache new user lock when map is saturated and all entries are retained"
); );
assert!( assert!(
!Arc::ptr_eq(&overflow_a, &overflow_b), Arc::ptr_eq(&overflow_a, &overflow_b),
"overflow user lock should be ephemeral under saturation to preserve bounded cache size" "overflow user lock should use deterministic striping under saturation"
); );
drop(retained); drop(retained);
@ -953,24 +949,6 @@ fn light_fuzz_desync_dedup_temporal_gate_behavior_is_stable() {
panic!("expected at least one post-window sample to re-emit forensic record"); panic!("expected at least one post-window sample to re-emit forensic record");
} }
#[test]
#[ignore = "Tracking for M-04: Verify should_emit_full_desync returns true on first occurrence and false on duplicate within window"]
fn should_emit_full_desync_filters_duplicates() {
unimplemented!("Stub for M-04");
}
#[test]
#[ignore = "Tracking for M-04: Verify desync dedup eviction behaves correctly under map-full condition"]
fn desync_dedup_eviction_under_map_full_condition() {
unimplemented!("Stub for M-04");
}
#[tokio::test]
#[ignore = "Tracking for M-05: Verify C2ME channel full path yields then sends under backpressure"]
async fn c2me_channel_full_path_yields_then_sends() {
unimplemented!("Stub for M-05");
}
fn make_forensics_state() -> RelayForensicsState { fn make_forensics_state() -> RelayForensicsState {
RelayForensicsState { RelayForensicsState {
trace_id: 1, trace_id: 1,

View File

@ -0,0 +1,168 @@
use super::*;
use crate::stream::BufferPool;
use std::collections::HashSet;
use std::sync::Arc;
use tokio::time::{Duration as TokioDuration, timeout};
fn make_pooled_payload(data: &[u8]) -> PooledBuffer {
let pool = Arc::new(BufferPool::with_config(data.len().max(1), 4));
let mut payload = pool.get();
payload.resize(data.len(), 0);
payload[..data.len()].copy_from_slice(data);
payload
}
#[test]
#[ignore = "Tracking for M-04: Verify should_emit_full_desync returns true on first occurrence and false on duplicate within window"]
fn should_emit_full_desync_filters_duplicates() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let key = 0x4D04_0000_0000_0001_u64;
let base = Instant::now();
assert!(
should_emit_full_desync(key, false, base),
"first occurrence must emit full forensic record"
);
assert!(
!should_emit_full_desync(key, false, base),
"duplicate at same timestamp must be suppressed"
);
let within_window = base + DESYNC_DEDUP_WINDOW - TokioDuration::from_millis(1);
assert!(
!should_emit_full_desync(key, false, within_window),
"duplicate strictly inside dedup window must stay suppressed"
);
let on_window_edge = base + DESYNC_DEDUP_WINDOW;
assert!(
should_emit_full_desync(key, false, on_window_edge),
"duplicate at window boundary must re-emit and refresh"
);
}
#[test]
#[ignore = "Tracking for M-04: Verify desync dedup eviction behaves correctly under map-full condition"]
fn desync_dedup_eviction_under_map_full_condition() {
let _guard = desync_dedup_test_lock()
.lock()
.expect("desync dedup test lock must be available");
clear_desync_dedup_for_testing();
let base = Instant::now();
for key in 0..DESYNC_DEDUP_MAX_ENTRIES as u64 {
assert!(
should_emit_full_desync(key, false, base),
"unique key should be inserted while warming dedup cache"
);
}
let dedup = DESYNC_DEDUP
.get()
.expect("dedup map must exist after warm-up insertions");
assert_eq!(
dedup.len(),
DESYNC_DEDUP_MAX_ENTRIES,
"cache warm-up must reach exact hard cap"
);
let before_keys: HashSet<u64> = dedup.iter().map(|entry| *entry.key()).collect();
let newcomer_key = 0x4D04_FFFF_FFFF_0001_u64;
assert!(
should_emit_full_desync(newcomer_key, false, base),
"first newcomer at map-full must emit under bounded full-cache gate"
);
let after_keys: HashSet<u64> = dedup.iter().map(|entry| *entry.key()).collect();
assert_eq!(
dedup.len(),
DESYNC_DEDUP_MAX_ENTRIES,
"map-full insertion must preserve hard capacity bound"
);
assert!(
after_keys.contains(&newcomer_key),
"newcomer must be present after bounded eviction path"
);
let removed_count = before_keys.difference(&after_keys).count();
let added_count = after_keys.difference(&before_keys).count();
assert_eq!(
removed_count, 1,
"map-full insertion must evict exactly one prior key"
);
assert_eq!(
added_count, 1,
"map-full insertion must add exactly one newcomer key"
);
assert!(
!should_emit_full_desync(newcomer_key, false, base),
"immediate duplicate newcomer must remain suppressed"
);
}
#[tokio::test]
#[ignore = "Tracking for M-05: Verify C2ME channel full path yields then sends under backpressure"]
async fn c2me_channel_full_path_yields_then_sends() {
let (tx, mut rx) = mpsc::channel::<C2MeCommand>(1);
tx.send(C2MeCommand::Data {
payload: make_pooled_payload(&[0xAA]),
flags: 1,
})
.await
.expect("priming queue with one frame must succeed");
let tx2 = tx.clone();
let producer = tokio::spawn(async move {
enqueue_c2me_command(
&tx2,
C2MeCommand::Data {
payload: make_pooled_payload(&[0xBB, 0xCC]),
flags: 2,
},
)
.await
});
tokio::task::yield_now().await;
tokio::time::sleep(TokioDuration::from_millis(10)).await;
assert!(
!producer.is_finished(),
"producer should stay pending while queue is full"
);
let first = timeout(TokioDuration::from_millis(100), rx.recv())
.await
.expect("receiver should observe primed frame")
.expect("first queued command must exist");
match first {
C2MeCommand::Data { payload, flags } => {
assert_eq!(payload.as_ref(), &[0xAA]);
assert_eq!(flags, 1);
}
C2MeCommand::Close => panic!("unexpected close command as first item"),
}
producer
.await
.expect("producer task must not panic")
.expect("blocked enqueue must succeed once receiver drains capacity");
let second = timeout(TokioDuration::from_millis(100), rx.recv())
.await
.expect("receiver should observe backpressure-resumed frame")
.expect("second queued command must exist");
match second {
C2MeCommand::Data { payload, flags } => {
assert_eq!(payload.as_ref(), &[0xBB, 0xCC]);
assert_eq!(flags, 2);
}
C2MeCommand::Close => panic!("unexpected close command as second item"),
}
}

View File

@ -652,7 +652,7 @@ mod tests {
let mut out = BytesMut::new(); let mut out = BytesMut::new();
codec.encode(&frame, &mut out).unwrap(); codec.encode(&frame, &mut out).unwrap();
assert!(out.len() >= 4 + payload.len() + 1); assert!(out.len() > 4 + payload.len());
let wire_len = u32::from_le_bytes([out[0], out[1], out[2], out[3]]) as usize; let wire_len = u32::from_le_bytes([out[0], out[1], out[2], out[3]]) as usize;
assert!( assert!(
(payload.len() + 1..=payload.len() + 3).contains(&wire_len), (payload.len() + 1..=payload.len() + 3).contains(&wire_len),

View File

@ -584,7 +584,7 @@ mod tests {
// Long frame (> 0x7f words = 508 bytes) // Long frame (> 0x7f words = 508 bytes)
let data: Vec<u8> = (0..1000).map(|i| (i % 256) as u8).collect(); let data: Vec<u8> = (0..1000).map(|i| (i % 256) as u8).collect();
let padded_len = (data.len() + 3) / 4 * 4; let padded_len = data.len().div_ceil(4) * 4;
let mut padded = data.clone(); let mut padded = data.clone();
padded.resize(padded_len, 0); padded.resize(padded_len, 0);

View File

@ -0,0 +1,56 @@
fn old_padding_round_up_to_4(len: usize) -> Option<usize> {
len.checked_add(3)
.map(|sum| sum / 4)
.and_then(|words| words.checked_mul(4))
}
fn new_padding_round_up_to_4(len: usize) -> Option<usize> {
len.div_ceil(4).checked_mul(4)
}
#[test]
fn padding_rounding_equivalent_for_extensive_safe_domain() {
for len in 0usize..=200_000usize {
let old = old_padding_round_up_to_4(len).expect("old expression must be safe");
let new = new_padding_round_up_to_4(len).expect("new expression must be safe");
assert_eq!(old, new, "mismatch for len={len}");
assert!(new >= len, "rounded length must not shrink: len={len}, out={new}");
assert_eq!(new % 4, 0, "rounded length must stay 4-byte aligned");
}
}
#[test]
fn padding_rounding_equivalent_near_usize_limit_when_old_is_defined() {
let candidates = [
usize::MAX - 3,
usize::MAX - 4,
usize::MAX - 5,
usize::MAX - 6,
usize::MAX - 7,
usize::MAX - 8,
usize::MAX - 15,
usize::MAX / 2,
(usize::MAX / 2) + 1,
];
for len in candidates {
let old = old_padding_round_up_to_4(len);
let new = new_padding_round_up_to_4(len);
if let Some(old_val) = old {
assert_eq!(Some(old_val), new, "safe-domain mismatch for len={len}");
}
}
}
#[test]
fn padding_rounding_documents_overflow_boundary_behavior() {
// For very large lengths, arithmetic round-up may overflow regardless of spelling.
// This documents the boundary so future changes do not assume universal safety.
assert_eq!(old_padding_round_up_to_4(usize::MAX), None);
assert_eq!(old_padding_round_up_to_4(usize::MAX - 1), None);
assert_eq!(old_padding_round_up_to_4(usize::MAX - 2), None);
// The div_ceil form avoids `len + 3` overflow, but final `* 4` can still overflow.
assert_eq!(new_padding_round_up_to_4(usize::MAX), None);
assert_eq!(new_padding_round_up_to_4(usize::MAX - 1), None);
}

View File

@ -8,6 +8,9 @@ pub mod state;
pub mod tls_stream; pub mod tls_stream;
pub mod traits; pub mod traits;
#[cfg(test)]
mod frame_stream_padding_security_tests;
// Legacy compatibility - will be removed later // Legacy compatibility - will be removed later
pub mod frame_stream; pub mod frame_stream;

View File

@ -154,7 +154,7 @@ impl TlsRecordHeader {
} }
TLS_RECORD_HANDSHAKE => { TLS_RECORD_HANDSHAKE => {
if len < 4 || len > MAX_TLS_PLAINTEXT_SIZE { if !(4..=MAX_TLS_PLAINTEXT_SIZE).contains(&len) {
return Err(Error::new( return Err(Error::new(
ErrorKind::InvalidData, ErrorKind::InvalidData,
format!( format!(

View File

@ -0,0 +1,114 @@
use std::net::{IpAddr, Ipv4Addr};
use std::sync::Arc;
use crate::ip_tracker::UserIpTracker;
fn ip_from_idx(idx: u32) -> IpAddr {
IpAddr::V4(Ipv4Addr::new(
172,
((idx >> 16) & 0xff) as u8,
((idx >> 8) & 0xff) as u8,
(idx & 0xff) as u8,
))
}
#[tokio::test]
async fn encapsulation_queue_len_helper_matches_enqueue_and_drain_lifecycle() {
let tracker = UserIpTracker::new();
let user = "encap-len-user";
for idx in 0..32 {
tracker.enqueue_cleanup(user.to_string(), ip_from_idx(idx));
}
assert_eq!(
tracker.cleanup_queue_len_for_tests(),
32,
"test helper must reflect queued cleanup entries before drain"
);
tracker.drain_cleanup_queue().await;
assert_eq!(
tracker.cleanup_queue_len_for_tests(),
0,
"cleanup queue must be empty after drain"
);
}
#[tokio::test]
async fn encapsulation_repeated_queue_poison_recovery_preserves_forward_progress() {
let tracker = UserIpTracker::new();
tracker.set_user_limit("encap-poison", 1).await;
let ip_primary = ip_from_idx(10_001);
let ip_alt = ip_from_idx(10_002);
tracker.check_and_add("encap-poison", ip_primary).await.unwrap();
for _ in 0..128 {
let queue = tracker.cleanup_queue_mutex_for_tests();
let _ = std::panic::catch_unwind(move || {
let _guard = queue.lock().unwrap();
panic!("intentional cleanup queue poison in encapsulation regression test");
});
tracker.enqueue_cleanup("encap-poison".to_string(), ip_primary);
assert!(
tracker.check_and_add("encap-poison", ip_alt).await.is_ok(),
"poison recovery must not block admission progress"
);
tracker.remove_ip("encap-poison", ip_alt).await;
tracker
.check_and_add("encap-poison", ip_primary)
.await
.unwrap();
}
}
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
async fn encapsulation_parallel_poison_and_churn_maintains_queue_and_limit_invariants() {
let tracker = Arc::new(UserIpTracker::new());
tracker.set_user_limit("encap-stress", 4).await;
let mut tasks = Vec::new();
for worker in 0..32u32 {
let t = tracker.clone();
tasks.push(tokio::spawn(async move {
let user = "encap-stress";
let ip = ip_from_idx(20_000 + worker);
for iter in 0..64u32 {
let _ = t.check_and_add(user, ip).await;
t.enqueue_cleanup(user.to_string(), ip);
if iter % 3 == 0 {
let queue = t.cleanup_queue_mutex_for_tests();
let _ = std::panic::catch_unwind(move || {
let _guard = queue.lock().unwrap();
panic!("intentional lock poison during parallel stress");
});
}
t.drain_cleanup_queue().await;
}
}));
}
for task in tasks {
task.await.expect("stress worker must not panic");
}
tracker.drain_cleanup_queue().await;
assert_eq!(
tracker.cleanup_queue_len_for_tests(),
0,
"queue must converge to empty after stress drain"
);
assert!(
tracker.get_active_ip_count("encap-stress").await <= 4,
"active unique IP count must remain bounded by configured limit"
);
}

View File

@ -509,8 +509,9 @@ async fn enqueue_cleanup_recovers_from_poisoned_mutex() {
let ip = ip_from_idx(99); let ip = ip_from_idx(99);
// Poison the lock by panicking while holding it // Poison the lock by panicking while holding it
let result = std::panic::catch_unwind(|| { let cleanup_queue = tracker.cleanup_queue_mutex_for_tests();
let _guard = tracker.cleanup_queue.lock().unwrap(); let result = std::panic::catch_unwind(move || {
let _guard = cleanup_queue.lock().unwrap();
panic!("Intentional poison panic"); panic!("Intentional poison panic");
}); });
assert!(result.is_err(), "Expected panic to poison mutex"); assert!(result.is_err(), "Expected panic to poison mutex");
@ -612,8 +613,9 @@ async fn poisoned_cleanup_queue_still_releases_slot_for_next_ip() {
tracker.check_and_add("poison-slot", ip1).await.unwrap(); tracker.check_and_add("poison-slot", ip1).await.unwrap();
// Poison the queue lock as an adversarial condition. // Poison the queue lock as an adversarial condition.
let _ = std::panic::catch_unwind(|| { let cleanup_queue = tracker.cleanup_queue_mutex_for_tests();
let _guard = tracker.cleanup_queue.lock().unwrap(); let _ = std::panic::catch_unwind(move || {
let _guard = cleanup_queue.lock().unwrap();
panic!("intentional queue poison"); panic!("intentional queue poison");
}); });
@ -660,8 +662,9 @@ async fn stress_repeated_queue_poison_recovery_preserves_admission_progress() {
.unwrap(); .unwrap();
for _ in 0..64 { for _ in 0..64 {
let _ = std::panic::catch_unwind(|| { let cleanup_queue = tracker.cleanup_queue_mutex_for_tests();
let _guard = tracker.cleanup_queue.lock().unwrap(); let _ = std::panic::catch_unwind(move || {
let _guard = cleanup_queue.lock().unwrap();
panic!("intentional queue poison in stress loop"); panic!("intentional queue poison in stress loop");
}); });

View File

@ -1,3 +1,5 @@
#![allow(clippy::too_many_arguments)]
use crate::crypto::{SecureRandom, sha256_hmac}; use crate::crypto::{SecureRandom, sha256_hmac};
use crate::protocol::constants::{ use crate::protocol::constants::{
MAX_TLS_CIPHERTEXT_SIZE, TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, MAX_TLS_CIPHERTEXT_SIZE, TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER,

View File

@ -1,3 +1,5 @@
#![allow(clippy::too_many_arguments)]
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
@ -810,7 +812,8 @@ mod tests {
#[test] #[test]
fn test_encode_tls13_certificate_message_single_cert() { fn test_encode_tls13_certificate_message_single_cert() {
let cert = vec![0x30, 0x03, 0x02, 0x01, 0x01]; let cert = vec![0x30, 0x03, 0x02, 0x01, 0x01];
let message = encode_tls13_certificate_message(&[cert.clone()]).expect("message"); let message = encode_tls13_certificate_message(std::slice::from_ref(&cert))
.expect("message");
assert_eq!(message[0], 0x0b); assert_eq!(message[0], 0x0b);
assert_eq!(read_u24(&message[1..4]), message.len() - 4); assert_eq!(read_u24(&message[1..4]), message.len() - 4);

View File

@ -355,49 +355,49 @@ async fn run_update_cycle(
let mut ready_v4: Option<(ProxyConfigData, u64)> = None; let mut ready_v4: Option<(ProxyConfigData, u64)> = None;
let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await; let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await;
if let Some(cfg_v4) = cfg_v4 { if let Some(cfg_v4) = cfg_v4
if snapshot_passes_guards(cfg, &cfg_v4, "getProxyConfig") { && snapshot_passes_guards(cfg, &cfg_v4, "getProxyConfig")
let cfg_v4_hash = hash_proxy_config(&cfg_v4); {
let stable_hits = state.config_v4.observe(cfg_v4_hash); let cfg_v4_hash = hash_proxy_config(&cfg_v4);
if stable_hits < required_cfg_snapshots { let stable_hits = state.config_v4.observe(cfg_v4_hash);
debug!( if stable_hits < required_cfg_snapshots {
stable_hits, debug!(
required_cfg_snapshots, stable_hits,
snapshot = format_args!("0x{cfg_v4_hash:016x}"), required_cfg_snapshots,
"ME config v4 candidate observed" snapshot = format_args!("0x{cfg_v4_hash:016x}"),
); "ME config v4 candidate observed"
} else if state.config_v4.is_applied(cfg_v4_hash) { );
debug!( } else if state.config_v4.is_applied(cfg_v4_hash) {
snapshot = format_args!("0x{cfg_v4_hash:016x}"), debug!(
"ME config v4 stable snapshot already applied" snapshot = format_args!("0x{cfg_v4_hash:016x}"),
); "ME config v4 stable snapshot already applied"
} else { );
ready_v4 = Some((cfg_v4, cfg_v4_hash)); } else {
} ready_v4 = Some((cfg_v4, cfg_v4_hash));
} }
} }
let mut ready_v6: Option<(ProxyConfigData, u64)> = None; let mut ready_v6: Option<(ProxyConfigData, u64)> = None;
let cfg_v6 = retry_fetch("https://core.telegram.org/getProxyConfigV6").await; let cfg_v6 = retry_fetch("https://core.telegram.org/getProxyConfigV6").await;
if let Some(cfg_v6) = cfg_v6 { if let Some(cfg_v6) = cfg_v6
if snapshot_passes_guards(cfg, &cfg_v6, "getProxyConfigV6") { && snapshot_passes_guards(cfg, &cfg_v6, "getProxyConfigV6")
let cfg_v6_hash = hash_proxy_config(&cfg_v6); {
let stable_hits = state.config_v6.observe(cfg_v6_hash); let cfg_v6_hash = hash_proxy_config(&cfg_v6);
if stable_hits < required_cfg_snapshots { let stable_hits = state.config_v6.observe(cfg_v6_hash);
debug!( if stable_hits < required_cfg_snapshots {
stable_hits, debug!(
required_cfg_snapshots, stable_hits,
snapshot = format_args!("0x{cfg_v6_hash:016x}"), required_cfg_snapshots,
"ME config v6 candidate observed" snapshot = format_args!("0x{cfg_v6_hash:016x}"),
); "ME config v6 candidate observed"
} else if state.config_v6.is_applied(cfg_v6_hash) { );
debug!( } else if state.config_v6.is_applied(cfg_v6_hash) {
snapshot = format_args!("0x{cfg_v6_hash:016x}"), debug!(
"ME config v6 stable snapshot already applied" snapshot = format_args!("0x{cfg_v6_hash:016x}"),
); "ME config v6 stable snapshot already applied"
} else { );
ready_v6 = Some((cfg_v6, cfg_v6_hash)); } else {
} ready_v6 = Some((cfg_v6, cfg_v6_hash));
} }
} }

View File

@ -1,3 +1,5 @@
#![allow(clippy::too_many_arguments)]
use std::collections::HashMap; use std::collections::HashMap;
use std::collections::HashSet; use std::collections::HashSet;
use std::net::SocketAddr; use std::net::SocketAddr;

View File

@ -1,3 +1,5 @@
#![allow(clippy::items_after_test_module)]
use std::collections::HashMap; use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::sync::Arc; use std::sync::Arc;
@ -12,6 +14,8 @@ use crate::transport::{UpstreamEgressInfo, UpstreamRouteKind};
use super::MePool; use super::MePool;
type MePingGroup = (MePingFamily, i32, Vec<(IpAddr, u16)>);
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MePingFamily { pub enum MePingFamily {
V4, V4,
@ -137,14 +141,14 @@ fn detect_interface_for_ip(ip: IpAddr) -> Option<String> {
if let Ok(addrs) = getifaddrs() { if let Ok(addrs) = getifaddrs() {
for iface in addrs { for iface in addrs {
if let Some(address) = iface.address { if let Some(address) = iface.address {
if let Some(v4) = address.as_sockaddr_in() { if let Some(v4) = address.as_sockaddr_in()
if IpAddr::V4(v4.ip()) == ip { && IpAddr::V4(v4.ip()) == ip
return Some(iface.interface_name); {
} return Some(iface.interface_name);
} else if let Some(v6) = address.as_sockaddr_in6() { } else if let Some(v6) = address.as_sockaddr_in6()
if IpAddr::V6(v6.ip()) == ip { && IpAddr::V6(v6.ip()) == ip
return Some(iface.interface_name); {
} return Some(iface.interface_name);
} }
} }
} }
@ -329,7 +333,7 @@ pub async fn run_me_ping(pool: &Arc<MePool>, rng: &SecureRandom) -> Vec<MePingRe
HashMap::new() HashMap::new()
}; };
let mut grouped: Vec<(MePingFamily, i32, Vec<(IpAddr, u16)>)> = Vec::new(); let mut grouped: Vec<MePingGroup> = Vec::new();
for (dc, addrs) in v4_map { for (dc, addrs) in v4_map {
grouped.push((MePingFamily::V4, dc, addrs)); grouped.push((MePingFamily::V4, dc, addrs));
} }

View File

@ -1,3 +1,5 @@
#![allow(clippy::too_many_arguments, clippy::type_complexity)]
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::net::{IpAddr, Ipv6Addr, SocketAddr};
use std::sync::Arc; use std::sync::Arc;
@ -619,6 +621,7 @@ impl MePool {
self.runtime_ready.load(Ordering::Relaxed) self.runtime_ready.load(Ordering::Relaxed)
} }
#[allow(dead_code)]
pub(super) fn set_family_runtime_state( pub(super) fn set_family_runtime_state(
&self, &self,
family: IpFamily, family: IpFamily,
@ -982,28 +985,33 @@ impl MePool {
Some(Duration::from_secs(secs)) Some(Duration::from_secs(secs))
} }
#[allow(dead_code)]
pub(super) fn drain_soft_evict_enabled(&self) -> bool { pub(super) fn drain_soft_evict_enabled(&self) -> bool {
self.me_pool_drain_soft_evict_enabled self.me_pool_drain_soft_evict_enabled
.load(Ordering::Relaxed) .load(Ordering::Relaxed)
} }
#[allow(dead_code)]
pub(super) fn drain_soft_evict_grace_secs(&self) -> u64 { pub(super) fn drain_soft_evict_grace_secs(&self) -> u64 {
self.me_pool_drain_soft_evict_grace_secs self.me_pool_drain_soft_evict_grace_secs
.load(Ordering::Relaxed) .load(Ordering::Relaxed)
} }
#[allow(dead_code)]
pub(super) fn drain_soft_evict_per_writer(&self) -> usize { pub(super) fn drain_soft_evict_per_writer(&self) -> usize {
self.me_pool_drain_soft_evict_per_writer self.me_pool_drain_soft_evict_per_writer
.load(Ordering::Relaxed) .load(Ordering::Relaxed)
.max(1) as usize .max(1) as usize
} }
#[allow(dead_code)]
pub(super) fn drain_soft_evict_budget_per_core(&self) -> usize { pub(super) fn drain_soft_evict_budget_per_core(&self) -> usize {
self.me_pool_drain_soft_evict_budget_per_core self.me_pool_drain_soft_evict_budget_per_core
.load(Ordering::Relaxed) .load(Ordering::Relaxed)
.max(1) as usize .max(1) as usize
} }
#[allow(dead_code)]
pub(super) fn drain_soft_evict_cooldown(&self) -> Duration { pub(super) fn drain_soft_evict_cooldown(&self) -> Duration {
Duration::from_millis( Duration::from_millis(
self.me_pool_drain_soft_evict_cooldown_ms self.me_pool_drain_soft_evict_cooldown_ms

View File

@ -293,20 +293,20 @@ impl MePool {
WriterContour::Draining => "draining", WriterContour::Draining => "draining",
}; };
if !draining { if !draining
if let Some(dc_idx) = dc { && let Some(dc_idx) = dc
*live_writers_by_dc_endpoint {
.entry((dc_idx, endpoint)) *live_writers_by_dc_endpoint
.or_insert(0) += 1; .entry((dc_idx, endpoint))
*live_writers_by_dc.entry(dc_idx).or_insert(0) += 1; .or_insert(0) += 1;
if let Some(ema_ms) = rtt_ema_ms { *live_writers_by_dc.entry(dc_idx).or_insert(0) += 1;
let entry = dc_rtt_agg.entry(dc_idx).or_insert((0.0, 0)); if let Some(ema_ms) = rtt_ema_ms {
entry.0 += ema_ms; let entry = dc_rtt_agg.entry(dc_idx).or_insert((0.0, 0));
entry.1 += 1; entry.0 += ema_ms;
} entry.1 += 1;
if matches_active_generation && in_desired_map { }
*fresh_writers_by_dc.entry(dc_idx).or_insert(0) += 1; if matches_active_generation && in_desired_map {
} *fresh_writers_by_dc.entry(dc_idx).or_insert(0) += 1;
} }
} }

View File

@ -268,10 +268,10 @@ impl MePool {
cancel_reader_token.cancel(); cancel_reader_token.cancel();
} }
} }
if let Err(e) = res { if let Err(e) = res
if !idle_close_by_peer { && !idle_close_by_peer
warn!(error = %e, "ME reader ended"); {
} warn!(error = %e, "ME reader ended");
} }
let remaining = writers_arc.read().await.len(); let remaining = writers_arc.read().await.len();
debug!(writer_id, remaining, "ME reader task finished"); debug!(writer_id, remaining, "ME reader task finished");
@ -386,10 +386,9 @@ impl MePool {
if cleanup_for_ping if cleanup_for_ping
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok() .is_ok()
&& let Some(pool) = pool_ping.upgrade()
{ {
if let Some(pool) = pool_ping.upgrade() { pool.remove_writer_and_close_clients(writer_id).await;
pool.remove_writer_and_close_clients(writer_id).await;
}
} }
break; break;
} }
@ -538,6 +537,7 @@ impl MePool {
.await .await
} }
#[allow(dead_code)]
async fn remove_writer_only(self: &Arc<Self>, writer_id: u64) -> bool { async fn remove_writer_only(self: &Arc<Self>, writer_id: u64) -> bool {
self.remove_writer_with_mode(writer_id, WriterTeardownMode::Any) self.remove_writer_with_mode(writer_id, WriterTeardownMode::Any)
.await .await

View File

@ -1,3 +1,5 @@
#![allow(clippy::too_many_arguments)]
use std::collections::HashMap; use std::collections::HashMap;
use std::io::ErrorKind; use std::io::ErrorKind;
use std::sync::Arc; use std::sync::Arc;

View File

@ -165,6 +165,7 @@ impl ConnRegistry {
None None
} }
#[allow(dead_code)]
pub async fn route(&self, id: u64, resp: MeResponse) -> RouteResult { pub async fn route(&self, id: u64, resp: MeResponse) -> RouteResult {
let tx = { let tx = {
let inner = self.inner.read().await; let inner = self.inner.read().await;
@ -438,6 +439,7 @@ impl ConnRegistry {
.unwrap_or(true) .unwrap_or(true)
} }
#[allow(dead_code)]
pub async fn unregister_writer_if_empty(&self, writer_id: u64) -> bool { pub async fn unregister_writer_if_empty(&self, writer_id: u64) -> bool {
let mut inner = self.inner.write().await; let mut inner = self.inner.write().await;
let Some(conn_ids) = inner.conns_for_writer.get(&writer_id) else { let Some(conn_ids) = inner.conns_for_writer.get(&writer_id) else {

View File

@ -1,3 +1,5 @@
#![allow(clippy::too_many_arguments)]
use std::cmp::Reverse; use std::cmp::Reverse;
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::net::SocketAddr; use std::net::SocketAddr;
@ -593,7 +595,7 @@ impl MePool {
let round = *hybrid_recovery_round; let round = *hybrid_recovery_round;
let target_triggered = self.trigger_async_recovery_for_target_dc(routed_dc).await; let target_triggered = self.trigger_async_recovery_for_target_dc(routed_dc).await;
if !target_triggered || round % HYBRID_GLOBAL_BURST_PERIOD_ROUNDS == 0 { if !target_triggered || round.is_multiple_of(HYBRID_GLOBAL_BURST_PERIOD_ROUNDS) {
self.trigger_async_recovery_global().await; self.trigger_async_recovery_global().await;
} }
*hybrid_recovery_round = round.saturating_add(1); *hybrid_recovery_round = round.saturating_add(1);
@ -672,7 +674,7 @@ impl MePool {
if !self.writer_eligible_for_selection(w, include_warm) { if !self.writer_eligible_for_selection(w, include_warm) {
continue; continue;
} }
if w.writer_dc == routed_dc && preferred.iter().any(|endpoint| *endpoint == w.addr) { if w.writer_dc == routed_dc && preferred.contains(&w.addr) {
out.push(idx); out.push(idx);
} }
} }

View File

@ -199,8 +199,12 @@ impl ConnectionPool {
/// Close all pooled connections /// Close all pooled connections
pub async fn close_all(&self) { pub async fn close_all(&self) {
let pools = self.pools.read(); let pools_snapshot: Vec<(SocketAddr, Arc<Mutex<PoolInner>>)> = {
for (addr, pool) in pools.iter() { let pools = self.pools.read();
pools.iter().map(|(addr, pool)| (*addr, Arc::clone(pool))).collect()
};
for (addr, pool) in pools_snapshot {
let mut inner = pool.lock().await; let mut inner = pool.lock().await;
let count = inner.connections.len(); let count = inner.connections.len();
inner.connections.clear(); inner.connections.clear();
@ -210,12 +214,15 @@ impl ConnectionPool {
/// Get pool statistics /// Get pool statistics
pub async fn stats(&self) -> PoolStats { pub async fn stats(&self) -> PoolStats {
let pools = self.pools.read(); let pools_snapshot: Vec<Arc<Mutex<PoolInner>>> = {
let pools = self.pools.read();
pools.values().cloned().collect()
};
let mut total_connections = 0; let mut total_connections = 0;
let mut total_pending = 0; let mut total_pending = 0;
let mut endpoints = 0; let mut endpoints = 0;
for pool in pools.values() { for pool in pools_snapshot {
let inner = pool.lock().await; let inner = pool.lock().await;
total_connections += inner.connections.len(); total_connections += inner.connections.len();
total_pending += inner.pending; total_pending += inner.pending;