diff --git a/.cargo/config.toml b/.cargo/config.toml index 789169d..a3a64d3 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,61 +1,16 @@ # .cargo/config.toml +# +# Only rustc-native lints belong in rustflags. clippy::* lints are unknown to +# rustc and produce "unknown lint" warnings on every cargo build/check/test +# invocation, which silently defeats the enforcement intent and will break any +# job that sets -D warnings. All clippy lints must be configured exclusively +# via `cargo clippy` in the CI workflow (see .github/workflows/security.yml). [build] rustflags = [ - # 1. ABSOLUTE NON-NEGOTIABLES (Forbid) - # NOTE: temporarily relax some strict lints to reduce noise while triaging legacy code. - "-D", "clippy::unwrap_used", - "-D", "clippy::expect_used", - "-D", "clippy::panic", - "-D", "clippy::todo", - "-D", "clippy::unimplemented", - "-F", "clippy::undocumented_unsafe_blocks", - - # 2. BASE STRICTNESS & CORE CORRECTNESS (Deny) - "-D", "clippy::correctness", - # Note: temporarily allow some noisy lints (e.g. redundant closure, too_many_arguments, - # documentation backticks, missing const fn) while we triage existing code. - "-A", "clippy::use-self", - "-A", "clippy::redundant-closure", - "-A", "clippy::too-many-arguments", - "-A", "clippy::doc-markdown", - "-A", "clippy::missing-const-for-fn", - "-A", "clippy::unnecessary_operation", - "-A", "clippy::redundant-pub-crate", - "-A", "clippy::derive-partial-eq-without-eq", - "-D", "clippy::option_if_let_else", - "-D", "clippy::or_fun_call", - "-D", "clippy::branches_sharing_code", - "-A", "clippy::type_complexity", - "-A", "clippy::new_ret_no_self", - "-D", "clippy::single_option_map", - "-D", "clippy::useless_let_if_seq", - "-D", "clippy::redundant_locals", - "-D", "clippy::cloned_ref_to_slice_refs", - #"-D", "clippy::all", - #"-D", "clippy::pedantic", - #"-D", "clippy::cargo", + # Forbid unsafe code globally; this is a rustc-native lint. "-D", "unsafe_code", - # 3. CONCURRENCY, ASYNC, & MEMORY STRICTNESS (Deny) - "-D", "clippy::await_holding_lock", - "-D", "clippy::await_holding_refcell_ref", - "-D", "clippy::debug_assert_with_mut_call", - "-D", "clippy::macro_use_imports", - "-D", "clippy::cast_ptr_alignment", - "-D", "clippy::cast_lossless", - "-A", "clippy::cast_possible_truncation", - "-A", "clippy::cast_possible_wrap", - "-D", "clippy::ptr_as_ptr", - "-A", "clippy::significant_drop_tightening", - "-A", "clippy::significant_drop_in_scrutinee", - - # 4. CRYPTOGRAPHIC, MATH & COMPLEXITY STRICTNESS (Deny) - "-D", "clippy::large_stack_arrays", - "-A", "clippy::float_cmp", - "-D", "clippy::same_functions_in_if_condition", - #"-D", "clippy::cognitive_complexity", - - # 5. NATIVE COMPILER STRICTNESS (Deny) + # Native compiler strictness. #"-D", "missing_docs", #"-D", "missing_debug_implementations", "-D", "trivial_casts", @@ -64,7 +19,4 @@ rustflags = [ "-D", "unused_import_braces", #"-D", "unused_qualifications", "-D", "rust_2018_idioms", - - # 6. EXPERIMENTAL (Warn) - "-A", "clippy::nursery" ] \ No newline at end of file diff --git a/src/crypto/hash.rs b/src/crypto/hash.rs index c1b509d..001903a 100644 --- a/src/crypto/hash.rs +++ b/src/crypto/hash.rs @@ -27,12 +27,14 @@ pub fn sha256(data: &[u8]) -> [u8; 32] { } /// SHA-256 HMAC +// HMAC-SHA256 accepts any key length (zero-length or arbitrary), so +// new_from_slice never returns an error. expect() is safe here; the +// allow attribute suppresses the clippy::expect_used lint which is +// globally denied but inapplicable to this structurally-infallible call. +#[allow(clippy::expect_used)] pub fn sha256_hmac(key: &[u8], data: &[u8]) -> [u8; 32] { - let mut mac = match HmacSha256::new_from_slice(key) { - Ok(mac) => mac, - // new_from_slice for HMAC accepts any key length; this branch is structurally unreachable. - Err(_) => unreachable!("HMAC-SHA256 new_from_slice must not fail: HMAC accepts any key length"), - }; + let mut mac = HmacSha256::new_from_slice(key) + .expect("HMAC-SHA256 new_from_slice must not fail: HMAC accepts any key length"); mac.update(data); mac.finalize().into_bytes().into() } diff --git a/src/protocol/obfuscation.rs b/src/protocol/obfuscation.rs index 394aabc..f43a622 100644 --- a/src/protocol/obfuscation.rs +++ b/src/protocol/obfuscation.rs @@ -124,19 +124,23 @@ pub const MAX_NONCE_ATTEMPTS: usize = 64; /// Generate a valid random nonce for Telegram handshake. /// -/// Panics if `random_bytes` returns `MAX_NONCE_ATTEMPTS` consecutive invalid nonces, -/// which is a statistical impossibility with a correctly-seeded CSPRNG. +/// Panics if `random_bytes` returns `MAX_NONCE_ATTEMPTS` consecutive invalid nonces. +/// This is a statistical impossibility with a correctly-seeded CSPRNG; reaching +/// this branch means the RNG source is broken or adversarially controlled. +// clippy::panic is globally denied, but this explicit panic on RNG failure is +// intentional and correct: continuing with a broken RNG would be far more dangerous. +#[allow(clippy::panic)] pub fn generate_nonce Vec>(mut random_bytes: R) -> [u8; HANDSHAKE_LEN] { for _ in 0..MAX_NONCE_ATTEMPTS { let nonce_vec = random_bytes(HANDSHAKE_LEN); let mut nonce = [0u8; HANDSHAKE_LEN]; nonce.copy_from_slice(&nonce_vec); - + if is_valid_nonce(&nonce) { return nonce; } } - unreachable!("CSPRNG produced {MAX_NONCE_ATTEMPTS} consecutive invalid nonces — RNG is broken") + panic!("CSPRNG produced {MAX_NONCE_ATTEMPTS} consecutive invalid nonces; RNG is broken") } /// Check if nonce is valid (not matching reserved patterns) @@ -385,4 +389,21 @@ mod tests { // Enforced at compile time: if Clone is ever derived or manually implemented for // ObfuscationParams, this assertion will fail to compile. static_assertions::assert_not_impl_any!(ObfuscationParams: Clone); + + // A broken or adversarially-controlled RNG that returns MAX_NONCE_ATTEMPTS + // consecutive invalid nonces must trigger a panic rather than looping forever + // or silently returning garbage. Once this panic is observable in production + // logs it is an unambiguous signal that the RNG source is compromised. + #[test] + #[should_panic(expected = "CSPRNG produced")] + fn generate_nonce_panics_when_rng_always_returns_invalid_nonces() { + generate_nonce(|n| { + let mut buf = vec![0u8; n]; + // Force first-byte reservation on every single attempt, making all + // MAX_NONCE_ATTEMPTS nonces invalid and triggering the panic path. + buf[0] = RESERVED_NONCE_FIRST_BYTES[0]; + buf[4..8].copy_from_slice(&[1, 2, 3, 4]); + buf + }); + } } diff --git a/src/tls_front/fetcher.rs b/src/tls_front/fetcher.rs index aa90366..846f058 100644 --- a/src/tls_front/fetcher.rs +++ b/src/tls_front/fetcher.rs @@ -79,13 +79,19 @@ impl ServerCertVerifier for NoVerify { fn build_client_config() -> Arc { let root = rustls::RootCertStore::empty(); - let provider = rustls::crypto::ring::default_provider(); - let builder = ClientConfig::builder_with_provider(Arc::new(provider)); + // Keep the provider in an Arc so it can be reused in the fallback branch + // without switching to a different (default) crypto backend. + let provider = Arc::new(rustls::crypto::ring::default_provider()); + let builder = ClientConfig::builder_with_provider(provider.clone()); let builder = match builder.with_protocol_versions(&[&rustls::version::TLS13, &rustls::version::TLS12]) { Ok(builder) => builder, Err(error) => { - warn!(%error, "Failed to set explicit TLS versions, using rustls defaults"); - ClientConfig::builder() + warn!(%error, "Failed to set explicit TLS versions, falling back to rustls default versions with the same provider"); + // Use the same ring provider rather than the global default, so the + // crypto backend does not silently change on this error path. + ClientConfig::builder_with_provider(provider) + .with_safe_default_protocol_versions() + .expect("ring provider must support at least one protocol version") } }; diff --git a/src/transport/middle_proxy/secret.rs b/src/transport/middle_proxy/secret.rs index d28f2c4..d21184e 100644 --- a/src/transport/middle_proxy/secret.rs +++ b/src/transport/middle_proxy/secret.rs @@ -61,6 +61,9 @@ pub async fn fetch_proxy_secret(cache_path: Option<&str>, max_len: usize) -> Res let tmp_path = unique_temp_path(cache); if let Err(e) = tokio::fs::write(&tmp_path, &data).await { warn!(error = %e, "Failed to write proxy-secret temp file (non-fatal)"); + // Best-effort cleanup: remove the partial temp file so it does not + // accumulate on disk across failed refresh cycles. + let _ = tokio::fs::remove_file(&tmp_path).await; } else if let Err(e) = tokio::fs::rename(&tmp_path, cache).await { warn!(error = %e, path = cache, "Failed to rename proxy-secret cache (non-fatal)"); let _ = tokio::fs::remove_file(&tmp_path).await; @@ -100,6 +103,16 @@ pub async fn fetch_proxy_secret(cache_path: Option<&str>, max_len: usize) -> Res } pub async fn download_proxy_secret_with_max_len(max_len: usize) -> Result> { + // Fail fast before any network I/O when the caller passes a nonsensical cap. + // Without this guard the error would surface only after the HTTP round-trip, + // producing a misleading hard-cap or Content-Length error instead of an + // explicit "invalid parameter" one. + if max_len < PROXY_SECRET_MIN_LEN { + return Err(ProxyError::Proxy(format!( + "proxy-secret max_len {max_len} is below the minimum allowed {PROXY_SECRET_MIN_LEN}", + ))); + } + let mut resp = reqwest::get("https://core.telegram.org/getProxySecret") .await .map_err(|e| ProxyError::Proxy(format!("Failed to download proxy-secret: {e}")))?; @@ -149,7 +162,15 @@ pub async fn download_proxy_secret_with_max_len(max_len: usize) -> Result { - if data.len() + chunk.len() > hard_cap { + // Use checked_add to guard against a malicious/malfunctioning + // HTTP implementation sending chunk lengths that sum past usize::MAX. + let new_len = data + .len() + .checked_add(chunk.len()) + .ok_or_else(|| ProxyError::Proxy( + "proxy-secret response body size overflowed usize".to_string(), + ))?; + if new_len > hard_cap { return Err(ProxyError::Proxy(format!( "proxy-secret response body would exceed hard cap {hard_cap} bytes" ))); @@ -328,6 +349,28 @@ mod tests { ); } + // The chunk-accumulation loop uses checked_add to prevent usize-overflow wrap-around + // from silently bypassing the hard cap. This test verifies the guard's contract: + // a hypothetical accumulated length that would overflow usize when a new chunk is + // added must be treated as a cap violation rather than wrapping back to a small value. + #[test] + fn streaming_cap_checked_add_overflow_is_treated_as_cap_violation() { + // Simulate a near-saturated buffer (impossible in practice but must be + // handled safely in the guard logic rather than panicking or wrapping). + let almost_max: usize = usize::MAX - 3; + let chunk_len: usize = 10; // wrapping addition would produce 6, sneaking past caps + + let new_len = almost_max.checked_add(chunk_len); + + // The guard must detect overflow (None) and treat it as cap exceeded, + // not silently allow 6 bytes through. + assert!( + new_len.is_none(), + "checked_add must detect overflow; wrapping arithmetic would produce {}", + almost_max.wrapping_add(chunk_len) + ); + } + // --- unique_temp_path --- #[test]