mirror of
https://github.com/telemt/telemt.git
synced 2026-04-18 11:04:09 +03:00
Compare commits
29 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
37a31c13cb | ||
|
|
35bca7d4cc | ||
|
|
f39d317d93 | ||
|
|
d4d93aabf5 | ||
|
|
c9271d9083 | ||
|
|
9c9ba4becd | ||
|
|
bd0cefdb12 | ||
|
|
e2ed1eb286 | ||
|
|
a74def9561 | ||
|
|
95c1306166 | ||
|
|
e1ef192c10 | ||
|
|
ee4d15fed6 | ||
|
|
0040e9b6da | ||
|
|
2c10560795 | ||
|
|
5eff38eb82 | ||
|
|
b6206a6dfe | ||
|
|
4d8a5ca174 | ||
|
|
0ae67db492 | ||
|
|
c4f77814ee | ||
|
|
92972ab6bf | ||
|
|
c351e08c43 | ||
|
|
e29855c8c6 | ||
|
|
3634fbd7e8 | ||
|
|
bb29797bfb | ||
|
|
3d5af3d248 | ||
|
|
2d7df3da6c | ||
|
|
4abc0e5134 | ||
|
|
4028579068 | ||
|
|
58f26ba8a7 |
2
Cargo.lock
generated
2
Cargo.lock
generated
@@ -2087,7 +2087,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "telemt"
|
name = "telemt"
|
||||||
version = "3.3.15"
|
version = "3.3.19"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes",
|
"aes",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "telemt"
|
name = "telemt"
|
||||||
version = "3.3.18"
|
version = "3.3.20"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -19,9 +19,9 @@
|
|||||||
|
|
||||||
### 🇷🇺 RU
|
### 🇷🇺 RU
|
||||||
|
|
||||||
#### Релиз 3.3.16
|
#### Релиз 3.3.15 Semistable
|
||||||
|
|
||||||
[3.3.16](https://github.com/telemt/telemt/releases/tag/3.3.16)!
|
[3.3.15](https://github.com/telemt/telemt/releases/tag/3.3.15) по итогам работы в продакшн признан одним из самых стабильных и рекомендуется к использованию, когда cutting-edge фичи некритичны!
|
||||||
|
|
||||||
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX**
|
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX**
|
||||||
|
|
||||||
@@ -40,9 +40,9 @@
|
|||||||
|
|
||||||
### 🇬🇧 EN
|
### 🇬🇧 EN
|
||||||
|
|
||||||
#### Release 3.3.16
|
#### Release 3.3.15 Semistable
|
||||||
|
|
||||||
[3.3.16](https://github.com/telemt/telemt/releases/tag/3.3.16)
|
[3.3.15](https://github.com/telemt/telemt/releases/tag/3.3.15) is, based on the results of his work in production, recognized as one of the most stable and recommended for use when cutting-edge features are not so necessary!
|
||||||
|
|
||||||
We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX**
|
We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX**
|
||||||
|
|
||||||
|
|||||||
@@ -32,6 +32,7 @@ show = "*"
|
|||||||
port = 443
|
port = 443
|
||||||
# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol
|
# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol
|
||||||
# metrics_port = 9090
|
# metrics_port = 9090
|
||||||
|
# metrics_listen = "0.0.0.0:9090" # Listen address for metrics (overrides metrics_port)
|
||||||
# metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"]
|
# metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"]
|
||||||
|
|
||||||
[server.api]
|
[server.api]
|
||||||
|
|||||||
@@ -55,7 +55,10 @@ user2 = "00000000000000000000000000000002"
|
|||||||
user3 = "00000000000000000000000000000003"
|
user3 = "00000000000000000000000000000003"
|
||||||
```
|
```
|
||||||
4. Save the config. Ctrl+S -> Ctrl+X. You don't need to restart telemt.
|
4. Save the config. Ctrl+S -> Ctrl+X. You don't need to restart telemt.
|
||||||
5. Get the links via `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
5. Get the links via
|
||||||
|
```bash
|
||||||
|
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||||
|
```
|
||||||
|
|
||||||
## How to view metrics
|
## How to view metrics
|
||||||
|
|
||||||
@@ -80,6 +83,13 @@ To specify a domain in the links, add to the `[general.links]` section of the co
|
|||||||
public_host = "proxy.example.com"
|
public_host = "proxy.example.com"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Server connection limit
|
||||||
|
Limits the total number of open connections to the server:
|
||||||
|
```toml
|
||||||
|
[server]
|
||||||
|
max_connections = 10000 # 0 - unlimited, 10000 - default
|
||||||
|
```
|
||||||
|
|
||||||
### Upstream Manager
|
### Upstream Manager
|
||||||
To specify an upstream, add to the `[[upstreams]]` section of the config.toml file:
|
To specify an upstream, add to the `[[upstreams]]` section of the config.toml file:
|
||||||
#### Binding to IP
|
#### Binding to IP
|
||||||
|
|||||||
@@ -55,7 +55,10 @@ user2 = "00000000000000000000000000000002"
|
|||||||
user3 = "00000000000000000000000000000003"
|
user3 = "00000000000000000000000000000003"
|
||||||
```
|
```
|
||||||
4. Сохранить конфиг. Ctrl+S -> Ctrl+X. Перезапускать telemt не нужно.
|
4. Сохранить конфиг. Ctrl+S -> Ctrl+X. Перезапускать telemt не нужно.
|
||||||
5. Получить ссылки через `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
5. Получить ссылки через
|
||||||
|
```bash
|
||||||
|
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||||
|
```
|
||||||
|
|
||||||
## Как посмотреть метрики
|
## Как посмотреть метрики
|
||||||
|
|
||||||
@@ -80,6 +83,13 @@ metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
|||||||
public_host = "proxy.example.com"
|
public_host = "proxy.example.com"
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Общий лимит подключений к серверу
|
||||||
|
Ограничивает общее число открытых подключений к серверу:
|
||||||
|
```toml
|
||||||
|
[server]
|
||||||
|
max_connections = 10000 # 0 - unlimited, 10000 - default
|
||||||
|
```
|
||||||
|
|
||||||
### Upstream Manager
|
### Upstream Manager
|
||||||
Чтобы указать апстрим, добавьте в секцию `[[upstreams]]` файла config.toml:
|
Чтобы указать апстрим, добавьте в секцию `[[upstreams]]` файла config.toml:
|
||||||
#### Привязка к IP
|
#### Привязка к IP
|
||||||
@@ -110,3 +120,4 @@ password = "pass" # Password for Auth on SOCKS-server
|
|||||||
weight = 1 # Set Weight for Scenarios
|
weight = 1 # Set Weight for Scenarios
|
||||||
enabled = true
|
enabled = true
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|||||||
@@ -72,6 +72,9 @@ classic = false
|
|||||||
secure = false
|
secure = false
|
||||||
tls = true
|
tls = true
|
||||||
|
|
||||||
|
[server]
|
||||||
|
port = 443
|
||||||
|
|
||||||
[server.api]
|
[server.api]
|
||||||
enabled = true
|
enabled = true
|
||||||
# listen = "127.0.0.1:9091"
|
# listen = "127.0.0.1:9091"
|
||||||
|
|||||||
@@ -72,6 +72,9 @@ classic = false
|
|||||||
secure = false
|
secure = false
|
||||||
tls = true
|
tls = true
|
||||||
|
|
||||||
|
[server]
|
||||||
|
port = 443
|
||||||
|
|
||||||
[server.api]
|
[server.api]
|
||||||
enabled = true
|
enabled = true
|
||||||
# listen = "127.0.0.1:9091"
|
# listen = "127.0.0.1:9091"
|
||||||
|
|||||||
@@ -38,8 +38,9 @@ umweltschutz.de -> A-запись 198.18.88.88
|
|||||||
|
|
||||||
В конфигурации Telemt:
|
В конфигурации Telemt:
|
||||||
|
|
||||||
```
|
```toml
|
||||||
tls_domain = umweltschutz.de
|
[censorship]
|
||||||
|
tls_domain = "umweltschutz.de"
|
||||||
```
|
```
|
||||||
|
|
||||||
Этот домен используется клиентом как SNI в ClientHello
|
Этот домен используется клиентом как SNI в ClientHello
|
||||||
@@ -56,8 +57,9 @@ tls_domain = umweltschutz.de
|
|||||||
|
|
||||||
В конфигурации Telemt:
|
В конфигурации Telemt:
|
||||||
|
|
||||||
```
|
```toml
|
||||||
mask_host = 127.0.0.1
|
[censorship]
|
||||||
|
mask_host = "127.0.0.1"
|
||||||
mask_port = 8443
|
mask_port = 8443
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -151,16 +153,18 @@ mask_host:mask_port
|
|||||||
|
|
||||||
Например:
|
Например:
|
||||||
|
|
||||||
```
|
```toml
|
||||||
tls_domain = github.com
|
[censorship]
|
||||||
mask_host = github.com
|
tls_domain = "github.com"
|
||||||
|
mask_host = "github.com"
|
||||||
mask_port = 443
|
mask_port = 443
|
||||||
```
|
```
|
||||||
|
|
||||||
или
|
или
|
||||||
|
|
||||||
```
|
```toml
|
||||||
mask_host = 140.82.121.4
|
[censorship]
|
||||||
|
mask_host = "140.82.121.4"
|
||||||
```
|
```
|
||||||
|
|
||||||
В этом случае:
|
В этом случае:
|
||||||
|
|||||||
@@ -90,6 +90,7 @@ pub(super) struct EffectiveMiddleProxyLimits {
|
|||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
pub(super) struct EffectiveUserIpPolicyLimits {
|
pub(super) struct EffectiveUserIpPolicyLimits {
|
||||||
|
pub(super) global_each: usize,
|
||||||
pub(super) mode: &'static str,
|
pub(super) mode: &'static str,
|
||||||
pub(super) window_secs: u64,
|
pub(super) window_secs: u64,
|
||||||
}
|
}
|
||||||
@@ -262,6 +263,7 @@ pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsD
|
|||||||
me2dc_fallback: cfg.general.me2dc_fallback,
|
me2dc_fallback: cfg.general.me2dc_fallback,
|
||||||
},
|
},
|
||||||
user_ip_policy: EffectiveUserIpPolicyLimits {
|
user_ip_policy: EffectiveUserIpPolicyLimits {
|
||||||
|
global_each: cfg.access.user_max_unique_ips_global_each,
|
||||||
mode: user_max_unique_ips_mode_label(cfg.access.user_max_unique_ips_mode),
|
mode: user_max_unique_ips_mode_label(cfg.access.user_max_unique_ips_mode),
|
||||||
window_secs: cfg.access.user_max_unique_ips_window_secs,
|
window_secs: cfg.access.user_max_unique_ips_window_secs,
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -386,7 +386,16 @@ pub(super) async fn users_from_config(
|
|||||||
.get(&username)
|
.get(&username)
|
||||||
.map(chrono::DateTime::<chrono::Utc>::to_rfc3339),
|
.map(chrono::DateTime::<chrono::Utc>::to_rfc3339),
|
||||||
data_quota_bytes: cfg.access.user_data_quota.get(&username).copied(),
|
data_quota_bytes: cfg.access.user_data_quota.get(&username).copied(),
|
||||||
max_unique_ips: cfg.access.user_max_unique_ips.get(&username).copied(),
|
max_unique_ips: cfg
|
||||||
|
.access
|
||||||
|
.user_max_unique_ips
|
||||||
|
.get(&username)
|
||||||
|
.copied()
|
||||||
|
.filter(|limit| *limit > 0)
|
||||||
|
.or(
|
||||||
|
(cfg.access.user_max_unique_ips_global_each > 0)
|
||||||
|
.then_some(cfg.access.user_max_unique_ips_global_each),
|
||||||
|
),
|
||||||
current_connections: stats.get_user_curr_connects(&username),
|
current_connections: stats.get_user_curr_connects(&username),
|
||||||
active_unique_ips: active_ip_list.len(),
|
active_unique_ips: active_ip_list.len(),
|
||||||
active_unique_ips_list: active_ip_list,
|
active_unique_ips_list: active_ip_list,
|
||||||
|
|||||||
@@ -147,6 +147,10 @@ pub(crate) fn default_proxy_protocol_header_timeout_ms() -> u64 {
|
|||||||
500
|
500
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_server_max_connections() -> u32 {
|
||||||
|
10_000
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_prefer_4() -> u8 {
|
pub(crate) fn default_prefer_4() -> u8 {
|
||||||
4
|
4
|
||||||
}
|
}
|
||||||
@@ -584,6 +588,10 @@ pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 {
|
|||||||
90
|
90
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_pool_drain_threshold() -> u64 {
|
||||||
|
128
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_me_bind_stale_ttl_secs() -> u64 {
|
pub(crate) fn default_me_bind_stale_ttl_secs() -> u64 {
|
||||||
default_me_pool_drain_ttl_secs()
|
default_me_pool_drain_ttl_secs()
|
||||||
}
|
}
|
||||||
@@ -635,6 +643,10 @@ pub(crate) fn default_user_max_unique_ips_window_secs() -> u64 {
|
|||||||
DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS
|
DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_user_max_unique_ips_global_each() -> usize {
|
||||||
|
0
|
||||||
|
}
|
||||||
|
|
||||||
// Custom deserializer helpers
|
// Custom deserializer helpers
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
|
|||||||
@@ -55,6 +55,7 @@ pub struct HotFields {
|
|||||||
pub me_reinit_coalesce_window_ms: u64,
|
pub me_reinit_coalesce_window_ms: u64,
|
||||||
pub hardswap: bool,
|
pub hardswap: bool,
|
||||||
pub me_pool_drain_ttl_secs: u64,
|
pub me_pool_drain_ttl_secs: u64,
|
||||||
|
pub me_pool_drain_threshold: u64,
|
||||||
pub me_pool_min_fresh_ratio: f32,
|
pub me_pool_min_fresh_ratio: f32,
|
||||||
pub me_reinit_drain_timeout_secs: u64,
|
pub me_reinit_drain_timeout_secs: u64,
|
||||||
pub me_hardswap_warmup_delay_min_ms: u64,
|
pub me_hardswap_warmup_delay_min_ms: u64,
|
||||||
@@ -118,6 +119,7 @@ pub struct HotFields {
|
|||||||
pub user_expirations: std::collections::HashMap<String, chrono::DateTime<chrono::Utc>>,
|
pub user_expirations: std::collections::HashMap<String, chrono::DateTime<chrono::Utc>>,
|
||||||
pub user_data_quota: std::collections::HashMap<String, u64>,
|
pub user_data_quota: std::collections::HashMap<String, u64>,
|
||||||
pub user_max_unique_ips: std::collections::HashMap<String, usize>,
|
pub user_max_unique_ips: std::collections::HashMap<String, usize>,
|
||||||
|
pub user_max_unique_ips_global_each: usize,
|
||||||
pub user_max_unique_ips_mode: crate::config::UserMaxUniqueIpsMode,
|
pub user_max_unique_ips_mode: crate::config::UserMaxUniqueIpsMode,
|
||||||
pub user_max_unique_ips_window_secs: u64,
|
pub user_max_unique_ips_window_secs: u64,
|
||||||
}
|
}
|
||||||
@@ -135,6 +137,7 @@ impl HotFields {
|
|||||||
me_reinit_coalesce_window_ms: cfg.general.me_reinit_coalesce_window_ms,
|
me_reinit_coalesce_window_ms: cfg.general.me_reinit_coalesce_window_ms,
|
||||||
hardswap: cfg.general.hardswap,
|
hardswap: cfg.general.hardswap,
|
||||||
me_pool_drain_ttl_secs: cfg.general.me_pool_drain_ttl_secs,
|
me_pool_drain_ttl_secs: cfg.general.me_pool_drain_ttl_secs,
|
||||||
|
me_pool_drain_threshold: cfg.general.me_pool_drain_threshold,
|
||||||
me_pool_min_fresh_ratio: cfg.general.me_pool_min_fresh_ratio,
|
me_pool_min_fresh_ratio: cfg.general.me_pool_min_fresh_ratio,
|
||||||
me_reinit_drain_timeout_secs: cfg.general.me_reinit_drain_timeout_secs,
|
me_reinit_drain_timeout_secs: cfg.general.me_reinit_drain_timeout_secs,
|
||||||
me_hardswap_warmup_delay_min_ms: cfg.general.me_hardswap_warmup_delay_min_ms,
|
me_hardswap_warmup_delay_min_ms: cfg.general.me_hardswap_warmup_delay_min_ms,
|
||||||
@@ -232,6 +235,7 @@ impl HotFields {
|
|||||||
user_expirations: cfg.access.user_expirations.clone(),
|
user_expirations: cfg.access.user_expirations.clone(),
|
||||||
user_data_quota: cfg.access.user_data_quota.clone(),
|
user_data_quota: cfg.access.user_data_quota.clone(),
|
||||||
user_max_unique_ips: cfg.access.user_max_unique_ips.clone(),
|
user_max_unique_ips: cfg.access.user_max_unique_ips.clone(),
|
||||||
|
user_max_unique_ips_global_each: cfg.access.user_max_unique_ips_global_each,
|
||||||
user_max_unique_ips_mode: cfg.access.user_max_unique_ips_mode,
|
user_max_unique_ips_mode: cfg.access.user_max_unique_ips_mode,
|
||||||
user_max_unique_ips_window_secs: cfg.access.user_max_unique_ips_window_secs,
|
user_max_unique_ips_window_secs: cfg.access.user_max_unique_ips_window_secs,
|
||||||
}
|
}
|
||||||
@@ -450,6 +454,7 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
|
|||||||
cfg.general.me_reinit_coalesce_window_ms = new.general.me_reinit_coalesce_window_ms;
|
cfg.general.me_reinit_coalesce_window_ms = new.general.me_reinit_coalesce_window_ms;
|
||||||
cfg.general.hardswap = new.general.hardswap;
|
cfg.general.hardswap = new.general.hardswap;
|
||||||
cfg.general.me_pool_drain_ttl_secs = new.general.me_pool_drain_ttl_secs;
|
cfg.general.me_pool_drain_ttl_secs = new.general.me_pool_drain_ttl_secs;
|
||||||
|
cfg.general.me_pool_drain_threshold = new.general.me_pool_drain_threshold;
|
||||||
cfg.general.me_pool_min_fresh_ratio = new.general.me_pool_min_fresh_ratio;
|
cfg.general.me_pool_min_fresh_ratio = new.general.me_pool_min_fresh_ratio;
|
||||||
cfg.general.me_reinit_drain_timeout_secs = new.general.me_reinit_drain_timeout_secs;
|
cfg.general.me_reinit_drain_timeout_secs = new.general.me_reinit_drain_timeout_secs;
|
||||||
cfg.general.me_hardswap_warmup_delay_min_ms = new.general.me_hardswap_warmup_delay_min_ms;
|
cfg.general.me_hardswap_warmup_delay_min_ms = new.general.me_hardswap_warmup_delay_min_ms;
|
||||||
@@ -532,6 +537,7 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
|
|||||||
cfg.access.user_expirations = new.access.user_expirations.clone();
|
cfg.access.user_expirations = new.access.user_expirations.clone();
|
||||||
cfg.access.user_data_quota = new.access.user_data_quota.clone();
|
cfg.access.user_data_quota = new.access.user_data_quota.clone();
|
||||||
cfg.access.user_max_unique_ips = new.access.user_max_unique_ips.clone();
|
cfg.access.user_max_unique_ips = new.access.user_max_unique_ips.clone();
|
||||||
|
cfg.access.user_max_unique_ips_global_each = new.access.user_max_unique_ips_global_each;
|
||||||
cfg.access.user_max_unique_ips_mode = new.access.user_max_unique_ips_mode;
|
cfg.access.user_max_unique_ips_mode = new.access.user_max_unique_ips_mode;
|
||||||
cfg.access.user_max_unique_ips_window_secs = new.access.user_max_unique_ips_window_secs;
|
cfg.access.user_max_unique_ips_window_secs = new.access.user_max_unique_ips_window_secs;
|
||||||
|
|
||||||
@@ -823,6 +829,13 @@ fn log_changes(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if old_hot.me_pool_drain_threshold != new_hot.me_pool_drain_threshold {
|
||||||
|
info!(
|
||||||
|
"config reload: me_pool_drain_threshold: {} → {}",
|
||||||
|
old_hot.me_pool_drain_threshold, new_hot.me_pool_drain_threshold,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
if (old_hot.me_pool_min_fresh_ratio - new_hot.me_pool_min_fresh_ratio).abs() > f32::EPSILON {
|
if (old_hot.me_pool_min_fresh_ratio - new_hot.me_pool_min_fresh_ratio).abs() > f32::EPSILON {
|
||||||
info!(
|
info!(
|
||||||
"config reload: me_pool_min_fresh_ratio: {:.3} → {:.3}",
|
"config reload: me_pool_min_fresh_ratio: {:.3} → {:.3}",
|
||||||
@@ -1099,12 +1112,14 @@ fn log_changes(
|
|||||||
new_hot.user_max_unique_ips.len()
|
new_hot.user_max_unique_ips.len()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if old_hot.user_max_unique_ips_mode != new_hot.user_max_unique_ips_mode
|
if old_hot.user_max_unique_ips_global_each != new_hot.user_max_unique_ips_global_each
|
||||||
|
|| old_hot.user_max_unique_ips_mode != new_hot.user_max_unique_ips_mode
|
||||||
|| old_hot.user_max_unique_ips_window_secs
|
|| old_hot.user_max_unique_ips_window_secs
|
||||||
!= new_hot.user_max_unique_ips_window_secs
|
!= new_hot.user_max_unique_ips_window_secs
|
||||||
{
|
{
|
||||||
info!(
|
info!(
|
||||||
"config reload: user_max_unique_ips policy mode={:?} window={}s",
|
"config reload: user_max_unique_ips policy global_each={} mode={:?} window={}s",
|
||||||
|
new_hot.user_max_unique_ips_global_each,
|
||||||
new_hot.user_max_unique_ips_mode,
|
new_hot.user_max_unique_ips_mode,
|
||||||
new_hot.user_max_unique_ips_window_secs
|
new_hot.user_max_unique_ips_window_secs
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ use ipnetwork::IpNetwork;
|
|||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::net::IpAddr;
|
use std::net::IpAddr;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use super::defaults::*;
|
use super::defaults::*;
|
||||||
|
|
||||||
@@ -356,6 +357,9 @@ impl Default for NetworkConfig {
|
|||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct GeneralConfig {
|
pub struct GeneralConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub data_path: Option<PathBuf>,
|
||||||
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub modes: ProxyModes,
|
pub modes: ProxyModes,
|
||||||
|
|
||||||
@@ -794,6 +798,11 @@ pub struct GeneralConfig {
|
|||||||
#[serde(default = "default_me_pool_drain_ttl_secs")]
|
#[serde(default = "default_me_pool_drain_ttl_secs")]
|
||||||
pub me_pool_drain_ttl_secs: u64,
|
pub me_pool_drain_ttl_secs: u64,
|
||||||
|
|
||||||
|
/// Maximum allowed number of draining ME writers before oldest ones are force-closed in batches.
|
||||||
|
/// Set to 0 to disable threshold-based draining cleanup and keep timeout-only behavior.
|
||||||
|
#[serde(default = "default_me_pool_drain_threshold")]
|
||||||
|
pub me_pool_drain_threshold: u64,
|
||||||
|
|
||||||
/// Policy for new binds on stale draining writers.
|
/// Policy for new binds on stale draining writers.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub me_bind_stale_mode: MeBindStaleMode,
|
pub me_bind_stale_mode: MeBindStaleMode,
|
||||||
@@ -866,6 +875,7 @@ pub struct GeneralConfig {
|
|||||||
impl Default for GeneralConfig {
|
impl Default for GeneralConfig {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
|
data_path: None,
|
||||||
modes: ProxyModes::default(),
|
modes: ProxyModes::default(),
|
||||||
prefer_ipv6: false,
|
prefer_ipv6: false,
|
||||||
fast_mode: default_true(),
|
fast_mode: default_true(),
|
||||||
@@ -973,6 +983,7 @@ impl Default for GeneralConfig {
|
|||||||
me_secret_atomic_snapshot: default_me_secret_atomic_snapshot(),
|
me_secret_atomic_snapshot: default_me_secret_atomic_snapshot(),
|
||||||
proxy_secret_len_max: default_proxy_secret_len_max(),
|
proxy_secret_len_max: default_proxy_secret_len_max(),
|
||||||
me_pool_drain_ttl_secs: default_me_pool_drain_ttl_secs(),
|
me_pool_drain_ttl_secs: default_me_pool_drain_ttl_secs(),
|
||||||
|
me_pool_drain_threshold: default_me_pool_drain_threshold(),
|
||||||
me_bind_stale_mode: MeBindStaleMode::default(),
|
me_bind_stale_mode: MeBindStaleMode::default(),
|
||||||
me_bind_stale_ttl_secs: default_me_bind_stale_ttl_secs(),
|
me_bind_stale_ttl_secs: default_me_bind_stale_ttl_secs(),
|
||||||
me_pool_min_fresh_ratio: default_me_pool_min_fresh_ratio(),
|
me_pool_min_fresh_ratio: default_me_pool_min_fresh_ratio(),
|
||||||
@@ -1145,9 +1156,17 @@ pub struct ServerConfig {
|
|||||||
#[serde(default = "default_proxy_protocol_header_timeout_ms")]
|
#[serde(default = "default_proxy_protocol_header_timeout_ms")]
|
||||||
pub proxy_protocol_header_timeout_ms: u64,
|
pub proxy_protocol_header_timeout_ms: u64,
|
||||||
|
|
||||||
|
/// Port for the Prometheus-compatible metrics endpoint.
|
||||||
|
/// Enables metrics when set; binds on all interfaces (dual-stack) by default.
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub metrics_port: Option<u16>,
|
pub metrics_port: Option<u16>,
|
||||||
|
|
||||||
|
/// Listen address for metrics in `IP:PORT` format (e.g. `"127.0.0.1:9090"`).
|
||||||
|
/// When set, takes precedence over `metrics_port` and binds on the specified address only.
|
||||||
|
#[serde(default)]
|
||||||
|
pub metrics_listen: Option<String>,
|
||||||
|
|
||||||
|
/// CIDR whitelist for the metrics endpoint.
|
||||||
#[serde(default = "default_metrics_whitelist")]
|
#[serde(default = "default_metrics_whitelist")]
|
||||||
pub metrics_whitelist: Vec<IpNetwork>,
|
pub metrics_whitelist: Vec<IpNetwork>,
|
||||||
|
|
||||||
@@ -1156,6 +1175,11 @@ pub struct ServerConfig {
|
|||||||
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub listeners: Vec<ListenerConfig>,
|
pub listeners: Vec<ListenerConfig>,
|
||||||
|
|
||||||
|
/// Maximum number of concurrent client connections.
|
||||||
|
/// 0 means unlimited.
|
||||||
|
#[serde(default = "default_server_max_connections")]
|
||||||
|
pub max_connections: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for ServerConfig {
|
impl Default for ServerConfig {
|
||||||
@@ -1170,9 +1194,11 @@ impl Default for ServerConfig {
|
|||||||
proxy_protocol: false,
|
proxy_protocol: false,
|
||||||
proxy_protocol_header_timeout_ms: default_proxy_protocol_header_timeout_ms(),
|
proxy_protocol_header_timeout_ms: default_proxy_protocol_header_timeout_ms(),
|
||||||
metrics_port: None,
|
metrics_port: None,
|
||||||
|
metrics_listen: None,
|
||||||
metrics_whitelist: default_metrics_whitelist(),
|
metrics_whitelist: default_metrics_whitelist(),
|
||||||
api: ApiConfig::default(),
|
api: ApiConfig::default(),
|
||||||
listeners: Vec::new(),
|
listeners: Vec::new(),
|
||||||
|
max_connections: default_server_max_connections(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1317,6 +1343,11 @@ pub struct AccessConfig {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub user_max_unique_ips: HashMap<String, usize>,
|
pub user_max_unique_ips: HashMap<String, usize>,
|
||||||
|
|
||||||
|
/// Global per-user unique IP limit applied when a user has no individual override.
|
||||||
|
/// `0` disables the inherited limit.
|
||||||
|
#[serde(default = "default_user_max_unique_ips_global_each")]
|
||||||
|
pub user_max_unique_ips_global_each: usize,
|
||||||
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub user_max_unique_ips_mode: UserMaxUniqueIpsMode,
|
pub user_max_unique_ips_mode: UserMaxUniqueIpsMode,
|
||||||
|
|
||||||
@@ -1342,6 +1373,7 @@ impl Default for AccessConfig {
|
|||||||
user_expirations: HashMap::new(),
|
user_expirations: HashMap::new(),
|
||||||
user_data_quota: HashMap::new(),
|
user_data_quota: HashMap::new(),
|
||||||
user_max_unique_ips: HashMap::new(),
|
user_max_unique_ips: HashMap::new(),
|
||||||
|
user_max_unique_ips_global_each: default_user_max_unique_ips_global_each(),
|
||||||
user_max_unique_ips_mode: UserMaxUniqueIpsMode::default(),
|
user_max_unique_ips_mode: UserMaxUniqueIpsMode::default(),
|
||||||
user_max_unique_ips_window_secs: default_user_max_unique_ips_window_secs(),
|
user_max_unique_ips_window_secs: default_user_max_unique_ips_window_secs(),
|
||||||
replay_check_len: default_replay_check_len(),
|
replay_check_len: default_replay_check_len(),
|
||||||
|
|||||||
@@ -17,6 +17,7 @@ pub struct UserIpTracker {
|
|||||||
active_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, usize>>>>,
|
active_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, usize>>>>,
|
||||||
recent_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, Instant>>>>,
|
recent_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, Instant>>>>,
|
||||||
max_ips: Arc<RwLock<HashMap<String, usize>>>,
|
max_ips: Arc<RwLock<HashMap<String, usize>>>,
|
||||||
|
default_max_ips: Arc<RwLock<usize>>,
|
||||||
limit_mode: Arc<RwLock<UserMaxUniqueIpsMode>>,
|
limit_mode: Arc<RwLock<UserMaxUniqueIpsMode>>,
|
||||||
limit_window: Arc<RwLock<Duration>>,
|
limit_window: Arc<RwLock<Duration>>,
|
||||||
last_compact_epoch_secs: Arc<AtomicU64>,
|
last_compact_epoch_secs: Arc<AtomicU64>,
|
||||||
@@ -28,6 +29,7 @@ impl UserIpTracker {
|
|||||||
active_ips: Arc::new(RwLock::new(HashMap::new())),
|
active_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||||
recent_ips: Arc::new(RwLock::new(HashMap::new())),
|
recent_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||||
max_ips: Arc::new(RwLock::new(HashMap::new())),
|
max_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||||
|
default_max_ips: Arc::new(RwLock::new(0)),
|
||||||
limit_mode: Arc::new(RwLock::new(UserMaxUniqueIpsMode::ActiveWindow)),
|
limit_mode: Arc::new(RwLock::new(UserMaxUniqueIpsMode::ActiveWindow)),
|
||||||
limit_window: Arc::new(RwLock::new(Duration::from_secs(30))),
|
limit_window: Arc::new(RwLock::new(Duration::from_secs(30))),
|
||||||
last_compact_epoch_secs: Arc::new(AtomicU64::new(0)),
|
last_compact_epoch_secs: Arc::new(AtomicU64::new(0)),
|
||||||
@@ -100,7 +102,10 @@ impl UserIpTracker {
|
|||||||
limits.remove(username);
|
limits.remove(username);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn load_limits(&self, limits: &HashMap<String, usize>) {
|
pub async fn load_limits(&self, default_limit: usize, limits: &HashMap<String, usize>) {
|
||||||
|
let mut default_max_ips = self.default_max_ips.write().await;
|
||||||
|
*default_max_ips = default_limit;
|
||||||
|
drop(default_max_ips);
|
||||||
let mut max_ips = self.max_ips.write().await;
|
let mut max_ips = self.max_ips.write().await;
|
||||||
max_ips.clone_from(limits);
|
max_ips.clone_from(limits);
|
||||||
}
|
}
|
||||||
@@ -114,9 +119,14 @@ impl UserIpTracker {
|
|||||||
|
|
||||||
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
|
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
|
||||||
self.maybe_compact_empty_users().await;
|
self.maybe_compact_empty_users().await;
|
||||||
|
let default_max_ips = *self.default_max_ips.read().await;
|
||||||
let limit = {
|
let limit = {
|
||||||
let max_ips = self.max_ips.read().await;
|
let max_ips = self.max_ips.read().await;
|
||||||
max_ips.get(username).copied()
|
max_ips
|
||||||
|
.get(username)
|
||||||
|
.copied()
|
||||||
|
.filter(|limit| *limit > 0)
|
||||||
|
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||||
};
|
};
|
||||||
let mode = *self.limit_mode.read().await;
|
let mode = *self.limit_mode.read().await;
|
||||||
let window = *self.limit_window.read().await;
|
let window = *self.limit_window.read().await;
|
||||||
@@ -255,10 +265,16 @@ impl UserIpTracker {
|
|||||||
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
|
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
|
||||||
let active_ips = self.active_ips.read().await;
|
let active_ips = self.active_ips.read().await;
|
||||||
let max_ips = self.max_ips.read().await;
|
let max_ips = self.max_ips.read().await;
|
||||||
|
let default_max_ips = *self.default_max_ips.read().await;
|
||||||
|
|
||||||
let mut stats = Vec::new();
|
let mut stats = Vec::new();
|
||||||
for (username, user_ips) in active_ips.iter() {
|
for (username, user_ips) in active_ips.iter() {
|
||||||
let limit = max_ips.get(username).copied().unwrap_or(0);
|
let limit = max_ips
|
||||||
|
.get(username)
|
||||||
|
.copied()
|
||||||
|
.filter(|limit| *limit > 0)
|
||||||
|
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||||
|
.unwrap_or(0);
|
||||||
stats.push((username.clone(), user_ips.len(), limit));
|
stats.push((username.clone(), user_ips.len(), limit));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -293,8 +309,13 @@ impl UserIpTracker {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
|
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
|
||||||
|
let default_max_ips = *self.default_max_ips.read().await;
|
||||||
let max_ips = self.max_ips.read().await;
|
let max_ips = self.max_ips.read().await;
|
||||||
max_ips.get(username).copied()
|
max_ips
|
||||||
|
.get(username)
|
||||||
|
.copied()
|
||||||
|
.filter(|limit| *limit > 0)
|
||||||
|
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn format_stats(&self) -> String {
|
pub async fn format_stats(&self) -> String {
|
||||||
@@ -546,7 +567,7 @@ mod tests {
|
|||||||
config_limits.insert("user1".to_string(), 5);
|
config_limits.insert("user1".to_string(), 5);
|
||||||
config_limits.insert("user2".to_string(), 3);
|
config_limits.insert("user2".to_string(), 3);
|
||||||
|
|
||||||
tracker.load_limits(&config_limits).await;
|
tracker.load_limits(0, &config_limits).await;
|
||||||
|
|
||||||
assert_eq!(tracker.get_user_limit("user1").await, Some(5));
|
assert_eq!(tracker.get_user_limit("user1").await, Some(5));
|
||||||
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
|
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
|
||||||
@@ -560,16 +581,46 @@ mod tests {
|
|||||||
let mut first = HashMap::new();
|
let mut first = HashMap::new();
|
||||||
first.insert("user1".to_string(), 2);
|
first.insert("user1".to_string(), 2);
|
||||||
first.insert("user2".to_string(), 3);
|
first.insert("user2".to_string(), 3);
|
||||||
tracker.load_limits(&first).await;
|
tracker.load_limits(0, &first).await;
|
||||||
|
|
||||||
let mut second = HashMap::new();
|
let mut second = HashMap::new();
|
||||||
second.insert("user2".to_string(), 5);
|
second.insert("user2".to_string(), 5);
|
||||||
tracker.load_limits(&second).await;
|
tracker.load_limits(0, &second).await;
|
||||||
|
|
||||||
assert_eq!(tracker.get_user_limit("user1").await, None);
|
assert_eq!(tracker.get_user_limit("user1").await, None);
|
||||||
assert_eq!(tracker.get_user_limit("user2").await, Some(5));
|
assert_eq!(tracker.get_user_limit("user2").await, Some(5));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_global_each_limit_applies_without_user_override() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.load_limits(2, &HashMap::new()).await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(172, 16, 0, 1);
|
||||||
|
let ip2 = test_ipv4(172, 16, 0, 2);
|
||||||
|
let ip3 = test_ipv4(172, 16, 0, 3);
|
||||||
|
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||||
|
assert_eq!(tracker.get_user_limit("test_user").await, Some(2));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_user_override_wins_over_global_each_limit() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
let mut limits = HashMap::new();
|
||||||
|
limits.insert("test_user".to_string(), 1);
|
||||||
|
tracker.load_limits(3, &limits).await;
|
||||||
|
|
||||||
|
let ip1 = test_ipv4(172, 17, 0, 1);
|
||||||
|
let ip2 = test_ipv4(172, 17, 0, 2);
|
||||||
|
|
||||||
|
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||||
|
assert_eq!(tracker.get_user_limit("test_user").await, Some(1));
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_time_window_mode_blocks_recent_ip_churn() {
|
async fn test_time_window_mode_blocks_recent_ip_churn() {
|
||||||
let tracker = UserIpTracker::new();
|
let tracker = UserIpTracker::new();
|
||||||
|
|||||||
450
src/ip_tracker_regression_tests.rs
Normal file
450
src/ip_tracker_regression_tests.rs
Normal file
@@ -0,0 +1,450 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use crate::config::UserMaxUniqueIpsMode;
|
||||||
|
use crate::ip_tracker::UserIpTracker;
|
||||||
|
|
||||||
|
fn ip_from_idx(idx: u32) -> IpAddr {
|
||||||
|
let a = 10u8;
|
||||||
|
let b = ((idx / 65_536) % 256) as u8;
|
||||||
|
let c = ((idx / 256) % 256) as u8;
|
||||||
|
let d = (idx % 256) as u8;
|
||||||
|
IpAddr::V4(Ipv4Addr::new(a, b, c, d))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn active_window_enforces_large_unique_ip_burst() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("burst_user", 64).await;
|
||||||
|
tracker
|
||||||
|
.set_limit_policy(UserMaxUniqueIpsMode::ActiveWindow, 30)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
for idx in 0..64 {
|
||||||
|
assert!(tracker.check_and_add("burst_user", ip_from_idx(idx)).await.is_ok());
|
||||||
|
}
|
||||||
|
assert!(tracker.check_and_add("burst_user", ip_from_idx(9_999)).await.is_err());
|
||||||
|
assert_eq!(tracker.get_active_ip_count("burst_user").await, 64);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn global_limit_applies_across_many_users() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.load_limits(3, &HashMap::new()).await;
|
||||||
|
|
||||||
|
for user_idx in 0..150u32 {
|
||||||
|
let user = format!("u{}", user_idx);
|
||||||
|
assert!(tracker.check_and_add(&user, ip_from_idx(user_idx * 10)).await.is_ok());
|
||||||
|
assert!(tracker
|
||||||
|
.check_and_add(&user, ip_from_idx(user_idx * 10 + 1))
|
||||||
|
.await
|
||||||
|
.is_ok());
|
||||||
|
assert!(tracker
|
||||||
|
.check_and_add(&user, ip_from_idx(user_idx * 10 + 2))
|
||||||
|
.await
|
||||||
|
.is_ok());
|
||||||
|
assert!(tracker
|
||||||
|
.check_and_add(&user, ip_from_idx(user_idx * 10 + 3))
|
||||||
|
.await
|
||||||
|
.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(tracker.get_stats().await.len(), 150);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn user_zero_override_falls_back_to_global_limit() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
let mut limits = HashMap::new();
|
||||||
|
limits.insert("target".to_string(), 0);
|
||||||
|
tracker.load_limits(2, &limits).await;
|
||||||
|
|
||||||
|
assert!(tracker.check_and_add("target", ip_from_idx(1)).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("target", ip_from_idx(2)).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("target", ip_from_idx(3)).await.is_err());
|
||||||
|
assert_eq!(tracker.get_user_limit("target").await, Some(2));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn remove_ip_is_idempotent_after_counter_reaches_zero() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("u", 2).await;
|
||||||
|
let ip = ip_from_idx(42);
|
||||||
|
|
||||||
|
tracker.check_and_add("u", ip).await.unwrap();
|
||||||
|
tracker.remove_ip("u", ip).await;
|
||||||
|
tracker.remove_ip("u", ip).await;
|
||||||
|
tracker.remove_ip("u", ip).await;
|
||||||
|
|
||||||
|
assert_eq!(tracker.get_active_ip_count("u").await, 0);
|
||||||
|
assert!(!tracker.is_ip_active("u", ip).await);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn clear_user_ips_resets_active_and_recent() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("u", 10).await;
|
||||||
|
|
||||||
|
for idx in 0..6 {
|
||||||
|
tracker.check_and_add("u", ip_from_idx(idx)).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
tracker.clear_user_ips("u").await;
|
||||||
|
|
||||||
|
assert_eq!(tracker.get_active_ip_count("u").await, 0);
|
||||||
|
let counts = tracker
|
||||||
|
.get_recent_counts_for_users(&["u".to_string()])
|
||||||
|
.await;
|
||||||
|
assert_eq!(counts.get("u").copied().unwrap_or(0), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn clear_all_resets_multi_user_state() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
|
||||||
|
for user_idx in 0..80u32 {
|
||||||
|
let user = format!("u{}", user_idx);
|
||||||
|
for ip_idx in 0..3 {
|
||||||
|
tracker
|
||||||
|
.check_and_add(&user, ip_from_idx(user_idx * 100 + ip_idx))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tracker.clear_all().await;
|
||||||
|
|
||||||
|
assert!(tracker.get_stats().await.is_empty());
|
||||||
|
let users = (0..80u32)
|
||||||
|
.map(|idx| format!("u{}", idx))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
let recent = tracker.get_recent_counts_for_users(&users).await;
|
||||||
|
assert!(recent.values().all(|count| *count == 0));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn get_active_ips_for_users_are_sorted() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("user", 10).await;
|
||||||
|
|
||||||
|
tracker
|
||||||
|
.check_and_add("user", IpAddr::V4(Ipv4Addr::new(10, 0, 0, 9)))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
tracker
|
||||||
|
.check_and_add("user", IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
tracker
|
||||||
|
.check_and_add("user", IpAddr::V4(Ipv4Addr::new(10, 0, 0, 5)))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let map = tracker
|
||||||
|
.get_active_ips_for_users(&["user".to_string()])
|
||||||
|
.await;
|
||||||
|
let ips = map.get("user").cloned().unwrap_or_default();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ips,
|
||||||
|
vec![
|
||||||
|
IpAddr::V4(Ipv4Addr::new(10, 0, 0, 1)),
|
||||||
|
IpAddr::V4(Ipv4Addr::new(10, 0, 0, 5)),
|
||||||
|
IpAddr::V4(Ipv4Addr::new(10, 0, 0, 9)),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn get_recent_ips_for_users_are_sorted() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("user", 10).await;
|
||||||
|
|
||||||
|
tracker
|
||||||
|
.check_and_add("user", IpAddr::V4(Ipv4Addr::new(10, 1, 0, 9)))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
tracker
|
||||||
|
.check_and_add("user", IpAddr::V4(Ipv4Addr::new(10, 1, 0, 1)))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
tracker
|
||||||
|
.check_and_add("user", IpAddr::V4(Ipv4Addr::new(10, 1, 0, 5)))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let map = tracker
|
||||||
|
.get_recent_ips_for_users(&["user".to_string()])
|
||||||
|
.await;
|
||||||
|
let ips = map.get("user").cloned().unwrap_or_default();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ips,
|
||||||
|
vec![
|
||||||
|
IpAddr::V4(Ipv4Addr::new(10, 1, 0, 1)),
|
||||||
|
IpAddr::V4(Ipv4Addr::new(10, 1, 0, 5)),
|
||||||
|
IpAddr::V4(Ipv4Addr::new(10, 1, 0, 9)),
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn time_window_expires_for_large_rotation() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("tw", 1).await;
|
||||||
|
tracker
|
||||||
|
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 1)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
tracker.check_and_add("tw", ip_from_idx(1)).await.unwrap();
|
||||||
|
tracker.remove_ip("tw", ip_from_idx(1)).await;
|
||||||
|
assert!(tracker.check_and_add("tw", ip_from_idx(2)).await.is_err());
|
||||||
|
|
||||||
|
tokio::time::sleep(Duration::from_millis(1_100)).await;
|
||||||
|
assert!(tracker.check_and_add("tw", ip_from_idx(2)).await.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn combined_mode_blocks_recent_after_disconnect() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("cmb", 1).await;
|
||||||
|
tracker
|
||||||
|
.set_limit_policy(UserMaxUniqueIpsMode::Combined, 2)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
tracker.check_and_add("cmb", ip_from_idx(11)).await.unwrap();
|
||||||
|
tracker.remove_ip("cmb", ip_from_idx(11)).await;
|
||||||
|
|
||||||
|
assert!(tracker.check_and_add("cmb", ip_from_idx(12)).await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn load_limits_replaces_large_limit_map() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
let mut first = HashMap::new();
|
||||||
|
let mut second = HashMap::new();
|
||||||
|
|
||||||
|
for idx in 0..300usize {
|
||||||
|
first.insert(format!("u{}", idx), 2usize);
|
||||||
|
}
|
||||||
|
for idx in 150..450usize {
|
||||||
|
second.insert(format!("u{}", idx), 4usize);
|
||||||
|
}
|
||||||
|
|
||||||
|
tracker.load_limits(0, &first).await;
|
||||||
|
tracker.load_limits(0, &second).await;
|
||||||
|
|
||||||
|
assert_eq!(tracker.get_user_limit("u20").await, None);
|
||||||
|
assert_eq!(tracker.get_user_limit("u200").await, Some(4));
|
||||||
|
assert_eq!(tracker.get_user_limit("u420").await, Some(4));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn concurrent_same_user_unique_ip_pressure_stays_bounded() {
|
||||||
|
let tracker = Arc::new(UserIpTracker::new());
|
||||||
|
tracker.set_user_limit("hot", 32).await;
|
||||||
|
tracker
|
||||||
|
.set_limit_policy(UserMaxUniqueIpsMode::ActiveWindow, 30)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
for worker in 0..16u32 {
|
||||||
|
let tracker_cloned = tracker.clone();
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
let base = worker * 200;
|
||||||
|
for step in 0..200u32 {
|
||||||
|
let _ = tracker_cloned
|
||||||
|
.check_and_add("hot", ip_from_idx(base + step))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
handle.await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(tracker.get_active_ip_count("hot").await <= 32);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn concurrent_many_users_isolate_limits() {
|
||||||
|
let tracker = Arc::new(UserIpTracker::new());
|
||||||
|
tracker.load_limits(4, &HashMap::new()).await;
|
||||||
|
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
for user_idx in 0..120u32 {
|
||||||
|
let tracker_cloned = tracker.clone();
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
let user = format!("u{}", user_idx);
|
||||||
|
for ip_idx in 0..10u32 {
|
||||||
|
let _ = tracker_cloned
|
||||||
|
.check_and_add(&user, ip_from_idx(user_idx * 1_000 + ip_idx))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
handle.await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let stats = tracker.get_stats().await;
|
||||||
|
assert_eq!(stats.len(), 120);
|
||||||
|
assert!(stats.iter().all(|(_, active, limit)| *active <= 4 && *limit == 4));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn same_ip_reconnect_high_frequency_keeps_single_unique() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("same", 2).await;
|
||||||
|
let ip = ip_from_idx(9);
|
||||||
|
|
||||||
|
for _ in 0..2_000 {
|
||||||
|
tracker.check_and_add("same", ip).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(tracker.get_active_ip_count("same").await, 1);
|
||||||
|
assert!(tracker.is_ip_active("same", ip).await);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn format_stats_contains_expected_limited_and_unlimited_markers() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("limited", 2).await;
|
||||||
|
tracker.check_and_add("limited", ip_from_idx(1)).await.unwrap();
|
||||||
|
tracker.check_and_add("open", ip_from_idx(2)).await.unwrap();
|
||||||
|
|
||||||
|
let text = tracker.format_stats().await;
|
||||||
|
|
||||||
|
assert!(text.contains("limited"));
|
||||||
|
assert!(text.contains("open"));
|
||||||
|
assert!(text.contains("unlimited"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn stats_report_global_default_for_users_without_override() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.load_limits(5, &HashMap::new()).await;
|
||||||
|
|
||||||
|
tracker.check_and_add("a", ip_from_idx(1)).await.unwrap();
|
||||||
|
tracker.check_and_add("b", ip_from_idx(2)).await.unwrap();
|
||||||
|
|
||||||
|
let stats = tracker.get_stats().await;
|
||||||
|
assert!(stats.iter().any(|(user, _, limit)| user == "a" && *limit == 5));
|
||||||
|
assert!(stats.iter().any(|(user, _, limit)| user == "b" && *limit == 5));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn stress_cycle_add_remove_clear_preserves_empty_end_state() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
|
||||||
|
for cycle in 0..50u32 {
|
||||||
|
let user = format!("cycle{}", cycle);
|
||||||
|
tracker.set_user_limit(&user, 128).await;
|
||||||
|
|
||||||
|
for ip_idx in 0..128u32 {
|
||||||
|
tracker
|
||||||
|
.check_and_add(&user, ip_from_idx(cycle * 10_000 + ip_idx))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
for ip_idx in 0..128u32 {
|
||||||
|
tracker
|
||||||
|
.remove_ip(&user, ip_from_idx(cycle * 10_000 + ip_idx))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
tracker.clear_user_ips(&user).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(tracker.get_stats().await.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn remove_unknown_user_or_ip_does_not_corrupt_state() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
|
||||||
|
tracker.remove_ip("no_user", ip_from_idx(1)).await;
|
||||||
|
tracker.check_and_add("x", ip_from_idx(2)).await.unwrap();
|
||||||
|
tracker.remove_ip("x", ip_from_idx(3)).await;
|
||||||
|
|
||||||
|
assert_eq!(tracker.get_active_ip_count("x").await, 1);
|
||||||
|
assert!(tracker.is_ip_active("x", ip_from_idx(2)).await);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn active_and_recent_views_match_after_mixed_workload() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.set_user_limit("mix", 16).await;
|
||||||
|
|
||||||
|
for ip_idx in 0..12u32 {
|
||||||
|
tracker.check_and_add("mix", ip_from_idx(ip_idx)).await.unwrap();
|
||||||
|
}
|
||||||
|
for ip_idx in 0..6u32 {
|
||||||
|
tracker.remove_ip("mix", ip_from_idx(ip_idx)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let active = tracker
|
||||||
|
.get_active_ips_for_users(&["mix".to_string()])
|
||||||
|
.await
|
||||||
|
.get("mix")
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_default();
|
||||||
|
let recent_count = tracker
|
||||||
|
.get_recent_counts_for_users(&["mix".to_string()])
|
||||||
|
.await
|
||||||
|
.get("mix")
|
||||||
|
.copied()
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
|
assert_eq!(active.len(), 6);
|
||||||
|
assert!(recent_count >= active.len());
|
||||||
|
assert!(recent_count <= 12);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn global_limit_switch_updates_enforcement_immediately() {
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker.load_limits(2, &HashMap::new()).await;
|
||||||
|
|
||||||
|
assert!(tracker.check_and_add("u", ip_from_idx(1)).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("u", ip_from_idx(2)).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("u", ip_from_idx(3)).await.is_err());
|
||||||
|
|
||||||
|
tracker.clear_user_ips("u").await;
|
||||||
|
tracker.load_limits(4, &HashMap::new()).await;
|
||||||
|
|
||||||
|
assert!(tracker.check_and_add("u", ip_from_idx(1)).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("u", ip_from_idx(2)).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("u", ip_from_idx(3)).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("u", ip_from_idx(4)).await.is_ok());
|
||||||
|
assert!(tracker.check_and_add("u", ip_from_idx(5)).await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||||
|
async fn concurrent_reconnect_and_disconnect_preserves_non_negative_counts() {
|
||||||
|
let tracker = Arc::new(UserIpTracker::new());
|
||||||
|
tracker.set_user_limit("cc", 8).await;
|
||||||
|
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
for worker in 0..8u32 {
|
||||||
|
let tracker_cloned = tracker.clone();
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
let ip = ip_from_idx(50 + worker);
|
||||||
|
for _ in 0..500u32 {
|
||||||
|
let _ = tracker_cloned.check_and_add("cc", ip).await;
|
||||||
|
tracker_cloned.remove_ip("cc", ip).await;
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
handle.await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(tracker.get_active_ip_count("cc").await <= 8);
|
||||||
|
}
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
use tokio::sync::watch;
|
use tokio::sync::watch;
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
@@ -9,8 +10,9 @@ use crate::transport::middle_proxy::{
|
|||||||
ProxyConfigData, fetch_proxy_config_with_raw, load_proxy_config_cache, save_proxy_config_cache,
|
ProxyConfigData, fetch_proxy_config_with_raw, load_proxy_config_cache, save_proxy_config_cache,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
|
pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
|
||||||
let mut config_path = "config.toml".to_string();
|
let mut config_path = "config.toml".to_string();
|
||||||
|
let mut data_path: Option<PathBuf> = None;
|
||||||
let mut silent = false;
|
let mut silent = false;
|
||||||
let mut log_level: Option<String> = None;
|
let mut log_level: Option<String> = None;
|
||||||
|
|
||||||
@@ -28,6 +30,18 @@ pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
|
|||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
while i < args.len() {
|
while i < args.len() {
|
||||||
match args[i].as_str() {
|
match args[i].as_str() {
|
||||||
|
"--data-path" => {
|
||||||
|
i += 1;
|
||||||
|
if i < args.len() {
|
||||||
|
data_path = Some(PathBuf::from(args[i].clone()));
|
||||||
|
} else {
|
||||||
|
eprintln!("Missing value for --data-path");
|
||||||
|
std::process::exit(0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s if s.starts_with("--data-path=") => {
|
||||||
|
data_path = Some(PathBuf::from(s.trim_start_matches("--data-path=").to_string()));
|
||||||
|
}
|
||||||
"--silent" | "-s" => {
|
"--silent" | "-s" => {
|
||||||
silent = true;
|
silent = true;
|
||||||
}
|
}
|
||||||
@@ -44,6 +58,7 @@ pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
|
|||||||
eprintln!("Usage: telemt [config.toml] [OPTIONS]");
|
eprintln!("Usage: telemt [config.toml] [OPTIONS]");
|
||||||
eprintln!();
|
eprintln!();
|
||||||
eprintln!("Options:");
|
eprintln!("Options:");
|
||||||
|
eprintln!(" --data-path <DIR> Set data directory (absolute path; overrides config value)");
|
||||||
eprintln!(" --silent, -s Suppress info logs");
|
eprintln!(" --silent, -s Suppress info logs");
|
||||||
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
|
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
|
||||||
eprintln!(" --help, -h Show this help");
|
eprintln!(" --help, -h Show this help");
|
||||||
@@ -78,7 +93,7 @@ pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
|
|||||||
i += 1;
|
i += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
(config_path, silent, log_level)
|
(config_path, data_path, silent, log_level)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
|
pub(crate) fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
|
||||||
|
|||||||
@@ -237,6 +237,7 @@ pub(crate) async fn initialize_me_pool(
|
|||||||
config.general.me_adaptive_floor_max_warm_writers_global,
|
config.general.me_adaptive_floor_max_warm_writers_global,
|
||||||
config.general.hardswap,
|
config.general.hardswap,
|
||||||
config.general.me_pool_drain_ttl_secs,
|
config.general.me_pool_drain_ttl_secs,
|
||||||
|
config.general.me_pool_drain_threshold,
|
||||||
config.general.effective_me_pool_force_close_secs(),
|
config.general.effective_me_pool_force_close_secs(),
|
||||||
config.general.me_pool_min_fresh_ratio,
|
config.general.me_pool_min_fresh_ratio,
|
||||||
config.general.me_hardswap_warmup_delay_min_ms,
|
config.general.me_hardswap_warmup_delay_min_ms,
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
startup_tracker
|
startup_tracker
|
||||||
.start_component(COMPONENT_CONFIG_LOAD, Some("load and validate config".to_string()))
|
.start_component(COMPONENT_CONFIG_LOAD, Some("load and validate config".to_string()))
|
||||||
.await;
|
.await;
|
||||||
let (config_path, cli_silent, cli_log_level) = parse_cli();
|
let (config_path, data_path, cli_silent, cli_log_level) = parse_cli();
|
||||||
|
|
||||||
let mut config = match ProxyConfig::load(&config_path) {
|
let mut config = match ProxyConfig::load(&config_path) {
|
||||||
Ok(c) => c,
|
Ok(c) => c,
|
||||||
@@ -80,6 +80,34 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let Some(p) = data_path {
|
||||||
|
config.general.data_path = Some(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ref data_path) = config.general.data_path {
|
||||||
|
if !data_path.is_absolute() {
|
||||||
|
eprintln!("[telemt] data_path must be absolute: {}", data_path.display());
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if data_path.exists() {
|
||||||
|
if !data_path.is_dir() {
|
||||||
|
eprintln!("[telemt] data_path exists but is not a directory: {}", data_path.display());
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if let Err(e) = std::fs::create_dir_all(data_path) {
|
||||||
|
eprintln!("[telemt] Can't create data_path {}: {}", data_path.display(), e);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Err(e) = std::env::set_current_dir(data_path) {
|
||||||
|
eprintln!("[telemt] Can't use data_path {}: {}", data_path.display(), e);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if let Err(e) = crate::network::dns_overrides::install_entries(&config.network.dns_overrides) {
|
if let Err(e) = crate::network::dns_overrides::install_entries(&config.network.dns_overrides) {
|
||||||
eprintln!("[telemt] Invalid network.dns_overrides: {}", e);
|
eprintln!("[telemt] Invalid network.dns_overrides: {}", e);
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
@@ -168,17 +196,24 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
stats.clone(),
|
stats.clone(),
|
||||||
));
|
));
|
||||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||||
ip_tracker.load_limits(&config.access.user_max_unique_ips).await;
|
ip_tracker
|
||||||
|
.load_limits(
|
||||||
|
config.access.user_max_unique_ips_global_each,
|
||||||
|
&config.access.user_max_unique_ips,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
ip_tracker
|
ip_tracker
|
||||||
.set_limit_policy(
|
.set_limit_policy(
|
||||||
config.access.user_max_unique_ips_mode,
|
config.access.user_max_unique_ips_mode,
|
||||||
config.access.user_max_unique_ips_window_secs,
|
config.access.user_max_unique_ips_window_secs,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
if !config.access.user_max_unique_ips.is_empty() {
|
if config.access.user_max_unique_ips_global_each > 0 || !config.access.user_max_unique_ips.is_empty()
|
||||||
|
{
|
||||||
info!(
|
info!(
|
||||||
"IP limits configured for {} users",
|
global_each_limit = config.access.user_max_unique_ips_global_each,
|
||||||
config.access.user_max_unique_ips.len()
|
explicit_user_limits = config.access.user_max_unique_ips.len(),
|
||||||
|
"User unique IP limits configured"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if !config.network.dns_overrides.is_empty() {
|
if !config.network.dns_overrides.is_empty() {
|
||||||
@@ -314,8 +349,13 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
let beobachten = Arc::new(BeobachtenStore::new());
|
let beobachten = Arc::new(BeobachtenStore::new());
|
||||||
let rng = Arc::new(SecureRandom::new());
|
let rng = Arc::new(SecureRandom::new());
|
||||||
|
|
||||||
// Connection concurrency limit
|
// Connection concurrency limit (0 = unlimited)
|
||||||
let max_connections = Arc::new(Semaphore::new(10_000));
|
let max_connections_limit = if config.server.max_connections == 0 {
|
||||||
|
Semaphore::MAX_PERMITS
|
||||||
|
} else {
|
||||||
|
config.server.max_connections as usize
|
||||||
|
};
|
||||||
|
let max_connections = Arc::new(Semaphore::new(max_connections_limit));
|
||||||
|
|
||||||
let me2dc_fallback = config.general.me2dc_fallback;
|
let me2dc_fallback = config.general.me2dc_fallback;
|
||||||
let me_init_retry_attempts = config.general.me_init_retry_attempts;
|
let me_init_retry_attempts = config.general.me_init_retry_attempts;
|
||||||
|
|||||||
@@ -131,6 +131,10 @@ pub(crate) async fn spawn_runtime_tasks(
|
|||||||
let mut config_rx_ip_limits = config_rx.clone();
|
let mut config_rx_ip_limits = config_rx.clone();
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let mut prev_limits = config_rx_ip_limits.borrow().access.user_max_unique_ips.clone();
|
let mut prev_limits = config_rx_ip_limits.borrow().access.user_max_unique_ips.clone();
|
||||||
|
let mut prev_global_each = config_rx_ip_limits
|
||||||
|
.borrow()
|
||||||
|
.access
|
||||||
|
.user_max_unique_ips_global_each;
|
||||||
let mut prev_mode = config_rx_ip_limits.borrow().access.user_max_unique_ips_mode;
|
let mut prev_mode = config_rx_ip_limits.borrow().access.user_max_unique_ips_mode;
|
||||||
let mut prev_window = config_rx_ip_limits
|
let mut prev_window = config_rx_ip_limits
|
||||||
.borrow()
|
.borrow()
|
||||||
@@ -143,9 +147,17 @@ pub(crate) async fn spawn_runtime_tasks(
|
|||||||
}
|
}
|
||||||
let cfg = config_rx_ip_limits.borrow_and_update().clone();
|
let cfg = config_rx_ip_limits.borrow_and_update().clone();
|
||||||
|
|
||||||
if prev_limits != cfg.access.user_max_unique_ips {
|
if prev_limits != cfg.access.user_max_unique_ips
|
||||||
ip_tracker_policy.load_limits(&cfg.access.user_max_unique_ips).await;
|
|| prev_global_each != cfg.access.user_max_unique_ips_global_each
|
||||||
|
{
|
||||||
|
ip_tracker_policy
|
||||||
|
.load_limits(
|
||||||
|
cfg.access.user_max_unique_ips_global_each,
|
||||||
|
&cfg.access.user_max_unique_ips,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
prev_limits = cfg.access.user_max_unique_ips.clone();
|
prev_limits = cfg.access.user_max_unique_ips.clone();
|
||||||
|
prev_global_each = cfg.access.user_max_unique_ips_global_each;
|
||||||
}
|
}
|
||||||
|
|
||||||
if prev_mode != cfg.access.user_max_unique_ips_mode
|
if prev_mode != cfg.access.user_max_unique_ips_mode
|
||||||
@@ -267,11 +279,32 @@ pub(crate) async fn spawn_metrics_if_configured(
|
|||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||||
) {
|
) {
|
||||||
if let Some(port) = config.server.metrics_port {
|
// metrics_listen takes precedence; fall back to metrics_port for backward compat.
|
||||||
|
let metrics_target: Option<(u16, Option<String>)> =
|
||||||
|
if let Some(ref listen) = config.server.metrics_listen {
|
||||||
|
match listen.parse::<std::net::SocketAddr>() {
|
||||||
|
Ok(addr) => Some((addr.port(), Some(listen.clone()))),
|
||||||
|
Err(e) => {
|
||||||
|
startup_tracker
|
||||||
|
.skip_component(
|
||||||
|
COMPONENT_METRICS_START,
|
||||||
|
Some(format!("invalid metrics_listen \"{}\": {}", listen, e)),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
config.server.metrics_port.map(|p| (p, None))
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some((port, listen)) = metrics_target {
|
||||||
|
let fallback_label = format!("port {}", port);
|
||||||
|
let label = listen.as_deref().unwrap_or(&fallback_label);
|
||||||
startup_tracker
|
startup_tracker
|
||||||
.start_component(
|
.start_component(
|
||||||
COMPONENT_METRICS_START,
|
COMPONENT_METRICS_START,
|
||||||
Some(format!("spawn metrics endpoint on {}", port)),
|
Some(format!("spawn metrics endpoint on {}", label)),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
let stats = stats.clone();
|
let stats = stats.clone();
|
||||||
@@ -282,6 +315,7 @@ pub(crate) async fn spawn_metrics_if_configured(
|
|||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
metrics::serve(
|
metrics::serve(
|
||||||
port,
|
port,
|
||||||
|
listen,
|
||||||
stats,
|
stats,
|
||||||
beobachten,
|
beobachten,
|
||||||
ip_tracker_metrics,
|
ip_tracker_metrics,
|
||||||
@@ -296,7 +330,7 @@ pub(crate) async fn spawn_metrics_if_configured(
|
|||||||
Some("metrics task spawned".to_string()),
|
Some("metrics task spawned".to_string()),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
} else {
|
} else if config.server.metrics_listen.is_none() {
|
||||||
startup_tracker
|
startup_tracker
|
||||||
.skip_component(
|
.skip_component(
|
||||||
COMPONENT_METRICS_START,
|
COMPONENT_METRICS_START,
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ mod config;
|
|||||||
mod crypto;
|
mod crypto;
|
||||||
mod error;
|
mod error;
|
||||||
mod ip_tracker;
|
mod ip_tracker;
|
||||||
|
#[cfg(test)]
|
||||||
|
mod ip_tracker_regression_tests;
|
||||||
mod maestro;
|
mod maestro;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
mod network;
|
mod network;
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ use crate::transport::{ListenOptions, create_listener};
|
|||||||
|
|
||||||
pub async fn serve(
|
pub async fn serve(
|
||||||
port: u16,
|
port: u16,
|
||||||
|
listen: Option<String>,
|
||||||
stats: Arc<Stats>,
|
stats: Arc<Stats>,
|
||||||
beobachten: Arc<BeobachtenStore>,
|
beobachten: Arc<BeobachtenStore>,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
@@ -28,6 +29,33 @@ pub async fn serve(
|
|||||||
whitelist: Vec<IpNetwork>,
|
whitelist: Vec<IpNetwork>,
|
||||||
) {
|
) {
|
||||||
let whitelist = Arc::new(whitelist);
|
let whitelist = Arc::new(whitelist);
|
||||||
|
|
||||||
|
// If `metrics_listen` is set, bind on that single address only.
|
||||||
|
if let Some(ref listen_addr) = listen {
|
||||||
|
let addr: SocketAddr = match listen_addr.parse() {
|
||||||
|
Ok(a) => a,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(error = %e, "Invalid metrics_listen address: {}", listen_addr);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let is_ipv6 = addr.is_ipv6();
|
||||||
|
match bind_metrics_listener(addr, is_ipv6) {
|
||||||
|
Ok(listener) => {
|
||||||
|
info!("Metrics endpoint: http://{}/metrics and /beobachten", addr);
|
||||||
|
serve_listener(
|
||||||
|
listener, stats, beobachten, ip_tracker, config_rx, whitelist,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!(error = %e, "Failed to bind metrics on {}", addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: bind on 0.0.0.0 and [::] using metrics_port.
|
||||||
let mut listener_v4 = None;
|
let mut listener_v4 = None;
|
||||||
let mut listener_v6 = None;
|
let mut listener_v6 = None;
|
||||||
|
|
||||||
@@ -1774,14 +1802,24 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
|||||||
"# HELP telemt_user_unique_ips_recent_window Per-user unique IPs seen in configured observation window"
|
"# HELP telemt_user_unique_ips_recent_window Per-user unique IPs seen in configured observation window"
|
||||||
);
|
);
|
||||||
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_recent_window gauge");
|
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_recent_window gauge");
|
||||||
let _ = writeln!(out, "# HELP telemt_user_unique_ips_limit Per-user configured unique IP limit (0 means unlimited)");
|
let _ = writeln!(out, "# HELP telemt_user_unique_ips_limit Effective per-user unique IP limit (0 means unlimited)");
|
||||||
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_limit gauge");
|
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_limit gauge");
|
||||||
let _ = writeln!(out, "# HELP telemt_user_unique_ips_utilization Per-user unique IP usage ratio (0 for unlimited)");
|
let _ = writeln!(out, "# HELP telemt_user_unique_ips_utilization Per-user unique IP usage ratio (0 for unlimited)");
|
||||||
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_utilization gauge");
|
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_utilization gauge");
|
||||||
|
|
||||||
for user in unique_users {
|
for user in unique_users {
|
||||||
let current = ip_counts.get(&user).copied().unwrap_or(0);
|
let current = ip_counts.get(&user).copied().unwrap_or(0);
|
||||||
let limit = config.access.user_max_unique_ips.get(&user).copied().unwrap_or(0);
|
let limit = config
|
||||||
|
.access
|
||||||
|
.user_max_unique_ips
|
||||||
|
.get(&user)
|
||||||
|
.copied()
|
||||||
|
.filter(|limit| *limit > 0)
|
||||||
|
.or(
|
||||||
|
(config.access.user_max_unique_ips_global_each > 0)
|
||||||
|
.then_some(config.access.user_max_unique_ips_global_each),
|
||||||
|
)
|
||||||
|
.unwrap_or(0);
|
||||||
let utilization = if limit > 0 {
|
let utilization = if limit > 0 {
|
||||||
current as f64 / limit as f64
|
current as f64 / limit as f64
|
||||||
} else {
|
} else {
|
||||||
@@ -1904,6 +1942,25 @@ mod tests {
|
|||||||
assert!(output.contains("telemt_user_unique_ips_recent_window{user="));
|
assert!(output.contains("telemt_user_unique_ips_recent_window{user="));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_render_uses_global_each_unique_ip_limit() {
|
||||||
|
let stats = Stats::new();
|
||||||
|
stats.increment_user_connects("alice");
|
||||||
|
stats.increment_user_curr_connects("alice");
|
||||||
|
let tracker = UserIpTracker::new();
|
||||||
|
tracker
|
||||||
|
.check_and_add("alice", "203.0.113.10".parse().unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let mut config = ProxyConfig::default();
|
||||||
|
config.access.user_max_unique_ips_global_each = 2;
|
||||||
|
|
||||||
|
let output = render_metrics(&stats, &config, &tracker).await;
|
||||||
|
|
||||||
|
assert!(output.contains("telemt_user_unique_ips_limit{user=\"alice\"} 2"));
|
||||||
|
assert!(output.contains("telemt_user_unique_ips_utilization{user=\"alice\"} 0.500000"));
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn test_render_has_type_annotations() {
|
async fn test_render_has_type_annotations() {
|
||||||
let stats = Stats::new();
|
let stats = Stats::new();
|
||||||
|
|||||||
@@ -298,6 +298,7 @@ async fn run_update_cycle(
|
|||||||
pool.update_runtime_reinit_policy(
|
pool.update_runtime_reinit_policy(
|
||||||
cfg.general.hardswap,
|
cfg.general.hardswap,
|
||||||
cfg.general.me_pool_drain_ttl_secs,
|
cfg.general.me_pool_drain_ttl_secs,
|
||||||
|
cfg.general.me_pool_drain_threshold,
|
||||||
cfg.general.effective_me_pool_force_close_secs(),
|
cfg.general.effective_me_pool_force_close_secs(),
|
||||||
cfg.general.me_pool_min_fresh_ratio,
|
cfg.general.me_pool_min_fresh_ratio,
|
||||||
cfg.general.me_hardswap_warmup_delay_min_ms,
|
cfg.general.me_hardswap_warmup_delay_min_ms,
|
||||||
@@ -524,6 +525,7 @@ pub async fn me_config_updater(
|
|||||||
pool.update_runtime_reinit_policy(
|
pool.update_runtime_reinit_policy(
|
||||||
cfg.general.hardswap,
|
cfg.general.hardswap,
|
||||||
cfg.general.me_pool_drain_ttl_secs,
|
cfg.general.me_pool_drain_ttl_secs,
|
||||||
|
cfg.general.me_pool_drain_threshold,
|
||||||
cfg.general.effective_me_pool_force_close_secs(),
|
cfg.general.effective_me_pool_force_close_secs(),
|
||||||
cfg.general.me_pool_min_fresh_ratio,
|
cfg.general.me_pool_min_fresh_ratio,
|
||||||
cfg.general.me_hardswap_warmup_delay_min_ms,
|
cfg.general.me_hardswap_warmup_delay_min_ms,
|
||||||
|
|||||||
@@ -25,6 +25,9 @@ const HEALTH_RECONNECT_BUDGET_PER_CORE: usize = 2;
|
|||||||
const HEALTH_RECONNECT_BUDGET_PER_DC: usize = 1;
|
const HEALTH_RECONNECT_BUDGET_PER_DC: usize = 1;
|
||||||
const HEALTH_RECONNECT_BUDGET_MIN: usize = 4;
|
const HEALTH_RECONNECT_BUDGET_MIN: usize = 4;
|
||||||
const HEALTH_RECONNECT_BUDGET_MAX: usize = 128;
|
const HEALTH_RECONNECT_BUDGET_MAX: usize = 128;
|
||||||
|
const HEALTH_DRAIN_CLOSE_BUDGET_PER_CORE: usize = 16;
|
||||||
|
const HEALTH_DRAIN_CLOSE_BUDGET_MIN: usize = 16;
|
||||||
|
const HEALTH_DRAIN_CLOSE_BUDGET_MAX: usize = 256;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
struct DcFloorPlanEntry {
|
struct DcFloorPlanEntry {
|
||||||
@@ -111,23 +114,66 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn reap_draining_writers(
|
pub(super) async fn reap_draining_writers(
|
||||||
pool: &Arc<MePool>,
|
pool: &Arc<MePool>,
|
||||||
warn_next_allowed: &mut HashMap<u64, Instant>,
|
warn_next_allowed: &mut HashMap<u64, Instant>,
|
||||||
) {
|
) {
|
||||||
let now_epoch_secs = MePool::now_epoch_secs();
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
let drain_ttl_secs = pool.me_pool_drain_ttl_secs.load(std::sync::atomic::Ordering::Relaxed);
|
let drain_ttl_secs = pool.me_pool_drain_ttl_secs.load(std::sync::atomic::Ordering::Relaxed);
|
||||||
|
let drain_threshold = pool
|
||||||
|
.me_pool_drain_threshold
|
||||||
|
.load(std::sync::atomic::Ordering::Relaxed);
|
||||||
let writers = pool.writers.read().await.clone();
|
let writers = pool.writers.read().await.clone();
|
||||||
|
let activity = pool.registry.writer_activity_snapshot().await;
|
||||||
|
let mut draining_writers = Vec::new();
|
||||||
|
let mut empty_writer_ids = Vec::<u64>::new();
|
||||||
|
let mut force_close_writer_ids = Vec::<u64>::new();
|
||||||
for writer in writers {
|
for writer in writers {
|
||||||
if !writer.draining.load(std::sync::atomic::Ordering::Relaxed) {
|
if !writer.draining.load(std::sync::atomic::Ordering::Relaxed) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let is_empty = pool.registry.is_writer_empty(writer.id).await;
|
if activity
|
||||||
if is_empty {
|
.bound_clients_by_writer
|
||||||
pool.remove_writer_and_close_clients(writer.id).await;
|
.get(&writer.id)
|
||||||
|
.copied()
|
||||||
|
.unwrap_or(0)
|
||||||
|
== 0
|
||||||
|
{
|
||||||
|
empty_writer_ids.push(writer.id);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
draining_writers.push(writer);
|
||||||
|
}
|
||||||
|
|
||||||
|
if drain_threshold > 0 && draining_writers.len() > drain_threshold as usize {
|
||||||
|
draining_writers.sort_by(|left, right| {
|
||||||
|
let left_started = left
|
||||||
|
.draining_started_at_epoch_secs
|
||||||
|
.load(std::sync::atomic::Ordering::Relaxed);
|
||||||
|
let right_started = right
|
||||||
|
.draining_started_at_epoch_secs
|
||||||
|
.load(std::sync::atomic::Ordering::Relaxed);
|
||||||
|
left_started
|
||||||
|
.cmp(&right_started)
|
||||||
|
.then_with(|| left.created_at.cmp(&right.created_at))
|
||||||
|
.then_with(|| left.id.cmp(&right.id))
|
||||||
|
});
|
||||||
|
let overflow = draining_writers.len().saturating_sub(drain_threshold as usize);
|
||||||
|
warn!(
|
||||||
|
draining_writers = draining_writers.len(),
|
||||||
|
me_pool_drain_threshold = drain_threshold,
|
||||||
|
removing_writers = overflow,
|
||||||
|
"ME draining writer threshold exceeded, force-closing oldest draining writers"
|
||||||
|
);
|
||||||
|
for writer in draining_writers.drain(..overflow) {
|
||||||
|
force_close_writer_ids.push(writer.id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut active_draining_writer_ids = HashSet::with_capacity(draining_writers.len());
|
||||||
|
for writer in draining_writers {
|
||||||
|
active_draining_writer_ids.insert(writer.id);
|
||||||
let drain_started_at_epoch_secs = writer
|
let drain_started_at_epoch_secs = writer
|
||||||
.draining_started_at_epoch_secs
|
.draining_started_at_epoch_secs
|
||||||
.load(std::sync::atomic::Ordering::Relaxed);
|
.load(std::sync::atomic::Ordering::Relaxed);
|
||||||
@@ -157,10 +203,59 @@ async fn reap_draining_writers(
|
|||||||
.load(std::sync::atomic::Ordering::Relaxed);
|
.load(std::sync::atomic::Ordering::Relaxed);
|
||||||
if deadline_epoch_secs != 0 && now_epoch_secs >= deadline_epoch_secs {
|
if deadline_epoch_secs != 0 && now_epoch_secs >= deadline_epoch_secs {
|
||||||
warn!(writer_id = writer.id, "Drain timeout, force-closing");
|
warn!(writer_id = writer.id, "Drain timeout, force-closing");
|
||||||
|
force_close_writer_ids.push(writer.id);
|
||||||
|
active_draining_writer_ids.remove(&writer.id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
warn_next_allowed.retain(|writer_id, _| active_draining_writer_ids.contains(writer_id));
|
||||||
|
|
||||||
|
let close_budget = health_drain_close_budget();
|
||||||
|
let requested_force_close = force_close_writer_ids.len();
|
||||||
|
let requested_empty_close = empty_writer_ids.len();
|
||||||
|
let requested_close_total = requested_force_close.saturating_add(requested_empty_close);
|
||||||
|
let mut closed_writer_ids = HashSet::<u64>::new();
|
||||||
|
let mut closed_total = 0usize;
|
||||||
|
for writer_id in force_close_writer_ids {
|
||||||
|
if closed_total >= close_budget {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if !closed_writer_ids.insert(writer_id) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
pool.stats.increment_pool_force_close_total();
|
pool.stats.increment_pool_force_close_total();
|
||||||
pool.remove_writer_and_close_clients(writer.id).await;
|
pool.remove_writer_and_close_clients(writer_id).await;
|
||||||
|
closed_total = closed_total.saturating_add(1);
|
||||||
}
|
}
|
||||||
|
for writer_id in empty_writer_ids {
|
||||||
|
if closed_total >= close_budget {
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
if !closed_writer_ids.insert(writer_id) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
pool.remove_writer_and_close_clients(writer_id).await;
|
||||||
|
closed_total = closed_total.saturating_add(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let pending_close_total = requested_close_total.saturating_sub(closed_total);
|
||||||
|
if pending_close_total > 0 {
|
||||||
|
warn!(
|
||||||
|
close_budget,
|
||||||
|
closed_total,
|
||||||
|
pending_close_total,
|
||||||
|
"ME draining close backlog deferred to next health cycle"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn health_drain_close_budget() -> usize {
|
||||||
|
let cpu_cores = std::thread::available_parallelism()
|
||||||
|
.map(std::num::NonZeroUsize::get)
|
||||||
|
.unwrap_or(1);
|
||||||
|
cpu_cores
|
||||||
|
.saturating_mul(HEALTH_DRAIN_CLOSE_BUDGET_PER_CORE)
|
||||||
|
.clamp(HEALTH_DRAIN_CLOSE_BUDGET_MIN, HEALTH_DRAIN_CLOSE_BUDGET_MAX)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn should_emit_writer_warn(
|
fn should_emit_writer_warn(
|
||||||
@@ -1270,3 +1365,190 @@ async fn maybe_rotate_single_endpoint_shadow(
|
|||||||
"Single-endpoint shadow writer rotated"
|
"Single-endpoint shadow writer rotated"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, AtomicU8, AtomicU32, AtomicU64, Ordering};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tokio_util::sync::CancellationToken;
|
||||||
|
|
||||||
|
use super::reap_draining_writers;
|
||||||
|
use crate::config::{GeneralConfig, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode};
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::network::probe::NetworkDecision;
|
||||||
|
use crate::stats::Stats;
|
||||||
|
use crate::transport::middle_proxy::codec::WriterCommand;
|
||||||
|
use crate::transport::middle_proxy::pool::{MePool, MeWriter, WriterContour};
|
||||||
|
use crate::transport::middle_proxy::registry::ConnMeta;
|
||||||
|
|
||||||
|
async fn make_pool(me_pool_drain_threshold: u64) -> Arc<MePool> {
|
||||||
|
let general = GeneralConfig {
|
||||||
|
me_pool_drain_threshold,
|
||||||
|
..GeneralConfig::default()
|
||||||
|
};
|
||||||
|
MePool::new(
|
||||||
|
None,
|
||||||
|
vec![1u8; 32],
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
None,
|
||||||
|
Vec::new(),
|
||||||
|
1,
|
||||||
|
None,
|
||||||
|
12,
|
||||||
|
1200,
|
||||||
|
HashMap::new(),
|
||||||
|
HashMap::new(),
|
||||||
|
None,
|
||||||
|
NetworkDecision::default(),
|
||||||
|
None,
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
Arc::new(Stats::default()),
|
||||||
|
general.me_keepalive_enabled,
|
||||||
|
general.me_keepalive_interval_secs,
|
||||||
|
general.me_keepalive_jitter_secs,
|
||||||
|
general.me_keepalive_payload_random,
|
||||||
|
general.rpc_proxy_req_every,
|
||||||
|
general.me_warmup_stagger_enabled,
|
||||||
|
general.me_warmup_step_delay_ms,
|
||||||
|
general.me_warmup_step_jitter_ms,
|
||||||
|
general.me_reconnect_max_concurrent_per_dc,
|
||||||
|
general.me_reconnect_backoff_base_ms,
|
||||||
|
general.me_reconnect_backoff_cap_ms,
|
||||||
|
general.me_reconnect_fast_retry_count,
|
||||||
|
general.me_single_endpoint_shadow_writers,
|
||||||
|
general.me_single_endpoint_outage_mode_enabled,
|
||||||
|
general.me_single_endpoint_outage_disable_quarantine,
|
||||||
|
general.me_single_endpoint_outage_backoff_min_ms,
|
||||||
|
general.me_single_endpoint_outage_backoff_max_ms,
|
||||||
|
general.me_single_endpoint_shadow_rotate_every_secs,
|
||||||
|
general.me_floor_mode,
|
||||||
|
general.me_adaptive_floor_idle_secs,
|
||||||
|
general.me_adaptive_floor_min_writers_single_endpoint,
|
||||||
|
general.me_adaptive_floor_min_writers_multi_endpoint,
|
||||||
|
general.me_adaptive_floor_recover_grace_secs,
|
||||||
|
general.me_adaptive_floor_writers_per_core_total,
|
||||||
|
general.me_adaptive_floor_cpu_cores_override,
|
||||||
|
general.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||||
|
general.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||||
|
general.me_adaptive_floor_max_active_writers_per_core,
|
||||||
|
general.me_adaptive_floor_max_warm_writers_per_core,
|
||||||
|
general.me_adaptive_floor_max_active_writers_global,
|
||||||
|
general.me_adaptive_floor_max_warm_writers_global,
|
||||||
|
general.hardswap,
|
||||||
|
general.me_pool_drain_ttl_secs,
|
||||||
|
general.me_pool_drain_threshold,
|
||||||
|
general.effective_me_pool_force_close_secs(),
|
||||||
|
general.me_pool_min_fresh_ratio,
|
||||||
|
general.me_hardswap_warmup_delay_min_ms,
|
||||||
|
general.me_hardswap_warmup_delay_max_ms,
|
||||||
|
general.me_hardswap_warmup_extra_passes,
|
||||||
|
general.me_hardswap_warmup_pass_backoff_base_ms,
|
||||||
|
general.me_bind_stale_mode,
|
||||||
|
general.me_bind_stale_ttl_secs,
|
||||||
|
general.me_secret_atomic_snapshot,
|
||||||
|
general.me_deterministic_writer_sort,
|
||||||
|
MeWriterPickMode::default(),
|
||||||
|
general.me_writer_pick_sample_size,
|
||||||
|
MeSocksKdfPolicy::default(),
|
||||||
|
general.me_writer_cmd_channel_capacity,
|
||||||
|
general.me_route_channel_capacity,
|
||||||
|
general.me_route_backpressure_base_timeout_ms,
|
||||||
|
general.me_route_backpressure_high_timeout_ms,
|
||||||
|
general.me_route_backpressure_high_watermark_pct,
|
||||||
|
general.me_reader_route_data_wait_ms,
|
||||||
|
general.me_health_interval_ms_unhealthy,
|
||||||
|
general.me_health_interval_ms_healthy,
|
||||||
|
general.me_warn_rate_limit_ms,
|
||||||
|
MeRouteNoWriterMode::default(),
|
||||||
|
general.me_route_no_writer_wait_ms,
|
||||||
|
general.me_route_inline_recovery_attempts,
|
||||||
|
general.me_route_inline_recovery_wait_ms,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn insert_draining_writer(
|
||||||
|
pool: &Arc<MePool>,
|
||||||
|
writer_id: u64,
|
||||||
|
drain_started_at_epoch_secs: u64,
|
||||||
|
) -> u64 {
|
||||||
|
let (conn_id, _rx) = pool.registry.register().await;
|
||||||
|
let (tx, _writer_rx) = mpsc::channel::<WriterCommand>(8);
|
||||||
|
let writer = MeWriter {
|
||||||
|
id: writer_id,
|
||||||
|
addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4000 + writer_id as u16),
|
||||||
|
source_ip: IpAddr::V4(Ipv4Addr::LOCALHOST),
|
||||||
|
writer_dc: 2,
|
||||||
|
generation: 1,
|
||||||
|
contour: Arc::new(AtomicU8::new(WriterContour::Draining.as_u8())),
|
||||||
|
created_at: Instant::now() - Duration::from_secs(writer_id),
|
||||||
|
tx: tx.clone(),
|
||||||
|
cancel: CancellationToken::new(),
|
||||||
|
degraded: Arc::new(AtomicBool::new(false)),
|
||||||
|
rtt_ema_ms_x10: Arc::new(AtomicU32::new(0)),
|
||||||
|
draining: Arc::new(AtomicBool::new(true)),
|
||||||
|
draining_started_at_epoch_secs: Arc::new(AtomicU64::new(drain_started_at_epoch_secs)),
|
||||||
|
drain_deadline_epoch_secs: Arc::new(AtomicU64::new(0)),
|
||||||
|
allow_drain_fallback: Arc::new(AtomicBool::new(false)),
|
||||||
|
};
|
||||||
|
pool.writers.write().await.push(writer);
|
||||||
|
pool.registry.register_writer(writer_id, tx).await;
|
||||||
|
pool.conn_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
assert!(
|
||||||
|
pool.registry
|
||||||
|
.bind_writer(
|
||||||
|
conn_id,
|
||||||
|
writer_id,
|
||||||
|
ConnMeta {
|
||||||
|
target_dc: 2,
|
||||||
|
client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6000),
|
||||||
|
our_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443),
|
||||||
|
proto_flags: 0,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
);
|
||||||
|
conn_id
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_force_closes_oldest_over_threshold() {
|
||||||
|
let pool = make_pool(2).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let conn_a = insert_draining_writer(&pool, 10, now_epoch_secs.saturating_sub(30)).await;
|
||||||
|
let conn_b = insert_draining_writer(&pool, 20, now_epoch_secs.saturating_sub(20)).await;
|
||||||
|
let conn_c = insert_draining_writer(&pool, 30, now_epoch_secs.saturating_sub(10)).await;
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
|
||||||
|
let writer_ids: Vec<u64> = pool.writers.read().await.iter().map(|writer| writer.id).collect();
|
||||||
|
assert_eq!(writer_ids, vec![20, 30]);
|
||||||
|
assert!(pool.registry.get_writer(conn_a).await.is_none());
|
||||||
|
assert_eq!(pool.registry.get_writer(conn_b).await.unwrap().writer_id, 20);
|
||||||
|
assert_eq!(pool.registry.get_writer(conn_c).await.unwrap().writer_id, 30);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_keeps_timeout_only_behavior_when_threshold_disabled() {
|
||||||
|
let pool = make_pool(0).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let conn_a = insert_draining_writer(&pool, 10, now_epoch_secs.saturating_sub(30)).await;
|
||||||
|
let conn_b = insert_draining_writer(&pool, 20, now_epoch_secs.saturating_sub(20)).await;
|
||||||
|
let conn_c = insert_draining_writer(&pool, 30, now_epoch_secs.saturating_sub(10)).await;
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
|
||||||
|
let writer_ids: Vec<u64> = pool.writers.read().await.iter().map(|writer| writer.id).collect();
|
||||||
|
assert_eq!(writer_ids, vec![10, 20, 30]);
|
||||||
|
assert_eq!(pool.registry.get_writer(conn_a).await.unwrap().writer_id, 10);
|
||||||
|
assert_eq!(pool.registry.get_writer(conn_b).await.unwrap().writer_id, 20);
|
||||||
|
assert_eq!(pool.registry.get_writer(conn_c).await.unwrap().writer_id, 30);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
437
src/transport/middle_proxy/health_adversarial_tests.rs
Normal file
437
src/transport/middle_proxy/health_adversarial_tests.rs
Normal file
@@ -0,0 +1,437 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, AtomicU8, AtomicU32, AtomicU64, Ordering};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tokio_util::sync::CancellationToken;
|
||||||
|
|
||||||
|
use super::codec::WriterCommand;
|
||||||
|
use super::health::{health_drain_close_budget, reap_draining_writers};
|
||||||
|
use super::pool::{MePool, MeWriter, WriterContour};
|
||||||
|
use super::registry::ConnMeta;
|
||||||
|
use super::me_health_monitor;
|
||||||
|
use crate::config::{GeneralConfig, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode};
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::network::probe::NetworkDecision;
|
||||||
|
use crate::stats::Stats;
|
||||||
|
|
||||||
|
async fn make_pool(
|
||||||
|
me_pool_drain_threshold: u64,
|
||||||
|
me_health_interval_ms_unhealthy: u64,
|
||||||
|
me_health_interval_ms_healthy: u64,
|
||||||
|
) -> (Arc<MePool>, Arc<SecureRandom>) {
|
||||||
|
let general = GeneralConfig {
|
||||||
|
me_pool_drain_threshold,
|
||||||
|
me_health_interval_ms_unhealthy,
|
||||||
|
me_health_interval_ms_healthy,
|
||||||
|
..GeneralConfig::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
let rng = Arc::new(SecureRandom::new());
|
||||||
|
let pool = MePool::new(
|
||||||
|
None,
|
||||||
|
vec![1u8; 32],
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
None,
|
||||||
|
Vec::new(),
|
||||||
|
1,
|
||||||
|
None,
|
||||||
|
12,
|
||||||
|
1200,
|
||||||
|
HashMap::new(),
|
||||||
|
HashMap::new(),
|
||||||
|
None,
|
||||||
|
NetworkDecision::default(),
|
||||||
|
None,
|
||||||
|
rng.clone(),
|
||||||
|
Arc::new(Stats::default()),
|
||||||
|
general.me_keepalive_enabled,
|
||||||
|
general.me_keepalive_interval_secs,
|
||||||
|
general.me_keepalive_jitter_secs,
|
||||||
|
general.me_keepalive_payload_random,
|
||||||
|
general.rpc_proxy_req_every,
|
||||||
|
general.me_warmup_stagger_enabled,
|
||||||
|
general.me_warmup_step_delay_ms,
|
||||||
|
general.me_warmup_step_jitter_ms,
|
||||||
|
general.me_reconnect_max_concurrent_per_dc,
|
||||||
|
general.me_reconnect_backoff_base_ms,
|
||||||
|
general.me_reconnect_backoff_cap_ms,
|
||||||
|
general.me_reconnect_fast_retry_count,
|
||||||
|
general.me_single_endpoint_shadow_writers,
|
||||||
|
general.me_single_endpoint_outage_mode_enabled,
|
||||||
|
general.me_single_endpoint_outage_disable_quarantine,
|
||||||
|
general.me_single_endpoint_outage_backoff_min_ms,
|
||||||
|
general.me_single_endpoint_outage_backoff_max_ms,
|
||||||
|
general.me_single_endpoint_shadow_rotate_every_secs,
|
||||||
|
general.me_floor_mode,
|
||||||
|
general.me_adaptive_floor_idle_secs,
|
||||||
|
general.me_adaptive_floor_min_writers_single_endpoint,
|
||||||
|
general.me_adaptive_floor_min_writers_multi_endpoint,
|
||||||
|
general.me_adaptive_floor_recover_grace_secs,
|
||||||
|
general.me_adaptive_floor_writers_per_core_total,
|
||||||
|
general.me_adaptive_floor_cpu_cores_override,
|
||||||
|
general.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||||
|
general.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||||
|
general.me_adaptive_floor_max_active_writers_per_core,
|
||||||
|
general.me_adaptive_floor_max_warm_writers_per_core,
|
||||||
|
general.me_adaptive_floor_max_active_writers_global,
|
||||||
|
general.me_adaptive_floor_max_warm_writers_global,
|
||||||
|
general.hardswap,
|
||||||
|
general.me_pool_drain_ttl_secs,
|
||||||
|
general.me_pool_drain_threshold,
|
||||||
|
general.effective_me_pool_force_close_secs(),
|
||||||
|
general.me_pool_min_fresh_ratio,
|
||||||
|
general.me_hardswap_warmup_delay_min_ms,
|
||||||
|
general.me_hardswap_warmup_delay_max_ms,
|
||||||
|
general.me_hardswap_warmup_extra_passes,
|
||||||
|
general.me_hardswap_warmup_pass_backoff_base_ms,
|
||||||
|
general.me_bind_stale_mode,
|
||||||
|
general.me_bind_stale_ttl_secs,
|
||||||
|
general.me_secret_atomic_snapshot,
|
||||||
|
general.me_deterministic_writer_sort,
|
||||||
|
MeWriterPickMode::default(),
|
||||||
|
general.me_writer_pick_sample_size,
|
||||||
|
MeSocksKdfPolicy::default(),
|
||||||
|
general.me_writer_cmd_channel_capacity,
|
||||||
|
general.me_route_channel_capacity,
|
||||||
|
general.me_route_backpressure_base_timeout_ms,
|
||||||
|
general.me_route_backpressure_high_timeout_ms,
|
||||||
|
general.me_route_backpressure_high_watermark_pct,
|
||||||
|
general.me_reader_route_data_wait_ms,
|
||||||
|
general.me_health_interval_ms_unhealthy,
|
||||||
|
general.me_health_interval_ms_healthy,
|
||||||
|
general.me_warn_rate_limit_ms,
|
||||||
|
MeRouteNoWriterMode::default(),
|
||||||
|
general.me_route_no_writer_wait_ms,
|
||||||
|
general.me_route_inline_recovery_attempts,
|
||||||
|
general.me_route_inline_recovery_wait_ms,
|
||||||
|
);
|
||||||
|
|
||||||
|
(pool, rng)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn insert_draining_writer(
|
||||||
|
pool: &Arc<MePool>,
|
||||||
|
writer_id: u64,
|
||||||
|
drain_started_at_epoch_secs: u64,
|
||||||
|
bound_clients: usize,
|
||||||
|
drain_deadline_epoch_secs: u64,
|
||||||
|
) {
|
||||||
|
let (tx, _writer_rx) = mpsc::channel::<WriterCommand>(8);
|
||||||
|
let writer = MeWriter {
|
||||||
|
id: writer_id,
|
||||||
|
addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6000 + writer_id as u16),
|
||||||
|
source_ip: IpAddr::V4(Ipv4Addr::LOCALHOST),
|
||||||
|
writer_dc: 2,
|
||||||
|
generation: 1,
|
||||||
|
contour: Arc::new(AtomicU8::new(WriterContour::Draining.as_u8())),
|
||||||
|
created_at: Instant::now() - Duration::from_secs(writer_id),
|
||||||
|
tx: tx.clone(),
|
||||||
|
cancel: CancellationToken::new(),
|
||||||
|
degraded: Arc::new(AtomicBool::new(false)),
|
||||||
|
rtt_ema_ms_x10: Arc::new(AtomicU32::new(0)),
|
||||||
|
draining: Arc::new(AtomicBool::new(true)),
|
||||||
|
draining_started_at_epoch_secs: Arc::new(AtomicU64::new(drain_started_at_epoch_secs)),
|
||||||
|
drain_deadline_epoch_secs: Arc::new(AtomicU64::new(drain_deadline_epoch_secs)),
|
||||||
|
allow_drain_fallback: Arc::new(AtomicBool::new(false)),
|
||||||
|
};
|
||||||
|
|
||||||
|
pool.writers.write().await.push(writer);
|
||||||
|
pool.registry.register_writer(writer_id, tx).await;
|
||||||
|
pool.conn_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
|
||||||
|
for idx in 0..bound_clients {
|
||||||
|
let (conn_id, _rx) = pool.registry.register().await;
|
||||||
|
assert!(
|
||||||
|
pool.registry
|
||||||
|
.bind_writer(
|
||||||
|
conn_id,
|
||||||
|
writer_id,
|
||||||
|
ConnMeta {
|
||||||
|
target_dc: 2,
|
||||||
|
client_addr: SocketAddr::new(
|
||||||
|
IpAddr::V4(Ipv4Addr::LOCALHOST),
|
||||||
|
8000 + idx as u16,
|
||||||
|
),
|
||||||
|
our_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443),
|
||||||
|
proto_flags: 0,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn writer_count(pool: &Arc<MePool>) -> usize {
|
||||||
|
pool.writers.read().await.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sorted_writer_ids(pool: &Arc<MePool>) -> Vec<u64> {
|
||||||
|
let mut ids = pool
|
||||||
|
.writers
|
||||||
|
.read()
|
||||||
|
.await
|
||||||
|
.iter()
|
||||||
|
.map(|writer| writer.id)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
ids.sort_unstable();
|
||||||
|
ids
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_clears_warn_state_when_pool_empty() {
|
||||||
|
let (pool, _rng) = make_pool(128, 1, 1).await;
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
warn_next_allowed.insert(11, Instant::now() + Duration::from_secs(5));
|
||||||
|
warn_next_allowed.insert(22, Instant::now() + Duration::from_secs(5));
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
|
||||||
|
assert!(warn_next_allowed.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_respects_threshold_across_multiple_overflow_cycles() {
|
||||||
|
let threshold = 3u64;
|
||||||
|
let (pool, _rng) = make_pool(threshold, 1, 1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
|
||||||
|
for writer_id in 1..=60u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(600).saturating_add(writer_id),
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
for _ in 0..64 {
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
if writer_count(&pool).await <= threshold as usize {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(writer_count(&pool).await, threshold as usize);
|
||||||
|
assert_eq!(sorted_writer_ids(&pool).await, vec![58, 59, 60]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_handles_large_empty_writer_population() {
|
||||||
|
let (pool, _rng) = make_pool(128, 1, 1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let total = health_drain_close_budget().saturating_mul(3).saturating_add(27);
|
||||||
|
|
||||||
|
for writer_id in 1..=total as u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(120),
|
||||||
|
0,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
for _ in 0..24 {
|
||||||
|
if writer_count(&pool).await == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(writer_count(&pool).await, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_processes_mass_deadline_expiry_without_unbounded_growth() {
|
||||||
|
let (pool, _rng) = make_pool(128, 1, 1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let total = health_drain_close_budget().saturating_mul(4).saturating_add(31);
|
||||||
|
|
||||||
|
for writer_id in 1..=total as u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(180),
|
||||||
|
1,
|
||||||
|
now_epoch_secs.saturating_sub(1),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
for _ in 0..40 {
|
||||||
|
if writer_count(&pool).await == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(writer_count(&pool).await, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_maintains_warn_state_subset_property_under_bulk_churn() {
|
||||||
|
let (pool, _rng) = make_pool(128, 1, 1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
for wave in 0..40u64 {
|
||||||
|
for offset in 0..8u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
wave * 100 + offset,
|
||||||
|
now_epoch_secs.saturating_sub(400 + offset),
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
assert!(warn_next_allowed.len() <= writer_count(&pool).await);
|
||||||
|
|
||||||
|
let ids = sorted_writer_ids(&pool).await;
|
||||||
|
for writer_id in ids.into_iter().take(3) {
|
||||||
|
let _ = pool.remove_writer_and_close_clients(writer_id).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
assert!(warn_next_allowed.len() <= writer_count(&pool).await);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_budgeted_cleanup_never_increases_pool_size() {
|
||||||
|
let (pool, _rng) = make_pool(5, 1, 1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
|
||||||
|
for writer_id in 1..=200u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(240).saturating_add(writer_id),
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
let mut previous = writer_count(&pool).await;
|
||||||
|
for _ in 0..32 {
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
let current = writer_count(&pool).await;
|
||||||
|
assert!(current <= previous);
|
||||||
|
previous = current;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn me_health_monitor_converges_to_threshold_under_live_injection_churn() {
|
||||||
|
let threshold = 7u64;
|
||||||
|
let (pool, rng) = make_pool(threshold, 1, 1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
|
||||||
|
for writer_id in 1..=40u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(300).saturating_add(writer_id),
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let monitor = tokio::spawn(me_health_monitor(pool.clone(), rng, 0));
|
||||||
|
|
||||||
|
for wave in 0..8u64 {
|
||||||
|
for offset in 0..10u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
1000 + wave * 100 + offset,
|
||||||
|
now_epoch_secs.saturating_sub(120).saturating_add(offset),
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
tokio::time::sleep(Duration::from_millis(5)).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
tokio::time::sleep(Duration::from_millis(120)).await;
|
||||||
|
monitor.abort();
|
||||||
|
let _ = monitor.await;
|
||||||
|
|
||||||
|
assert!(writer_count(&pool).await <= threshold as usize);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn me_health_monitor_drains_deadline_storm_with_budgeted_progress() {
|
||||||
|
let (pool, rng) = make_pool(128, 1, 1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
|
||||||
|
for writer_id in 1..=220u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(120),
|
||||||
|
1,
|
||||||
|
now_epoch_secs.saturating_sub(1),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let monitor = tokio::spawn(me_health_monitor(pool.clone(), rng, 0));
|
||||||
|
tokio::time::sleep(Duration::from_millis(120)).await;
|
||||||
|
monitor.abort();
|
||||||
|
let _ = monitor.await;
|
||||||
|
|
||||||
|
assert_eq!(writer_count(&pool).await, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn me_health_monitor_eliminates_mixed_empty_and_deadline_backlog() {
|
||||||
|
let threshold = 12u64;
|
||||||
|
let (pool, rng) = make_pool(threshold, 1, 1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
|
||||||
|
for writer_id in 1..=180u64 {
|
||||||
|
let bound_clients = if writer_id % 3 == 0 { 0 } else { 1 };
|
||||||
|
let deadline = if writer_id % 2 == 0 {
|
||||||
|
now_epoch_secs.saturating_sub(1)
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(250).saturating_add(writer_id),
|
||||||
|
bound_clients,
|
||||||
|
deadline,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let monitor = tokio::spawn(me_health_monitor(pool.clone(), rng, 0));
|
||||||
|
tokio::time::sleep(Duration::from_millis(140)).await;
|
||||||
|
monitor.abort();
|
||||||
|
let _ = monitor.await;
|
||||||
|
|
||||||
|
assert!(writer_count(&pool).await <= threshold as usize);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn health_drain_close_budget_is_within_expected_bounds() {
|
||||||
|
let budget = health_drain_close_budget();
|
||||||
|
assert!((16..=256).contains(&budget));
|
||||||
|
}
|
||||||
227
src/transport/middle_proxy/health_integration_tests.rs
Normal file
227
src/transport/middle_proxy/health_integration_tests.rs
Normal file
@@ -0,0 +1,227 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, AtomicU8, AtomicU32, AtomicU64, Ordering};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tokio_util::sync::CancellationToken;
|
||||||
|
|
||||||
|
use super::codec::WriterCommand;
|
||||||
|
use super::health::health_drain_close_budget;
|
||||||
|
use super::pool::{MePool, MeWriter, WriterContour};
|
||||||
|
use super::registry::ConnMeta;
|
||||||
|
use super::me_health_monitor;
|
||||||
|
use crate::config::{GeneralConfig, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode};
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::network::probe::NetworkDecision;
|
||||||
|
use crate::stats::Stats;
|
||||||
|
|
||||||
|
async fn make_pool(
|
||||||
|
me_pool_drain_threshold: u64,
|
||||||
|
me_health_interval_ms_unhealthy: u64,
|
||||||
|
me_health_interval_ms_healthy: u64,
|
||||||
|
) -> (Arc<MePool>, Arc<SecureRandom>) {
|
||||||
|
let general = GeneralConfig {
|
||||||
|
me_pool_drain_threshold,
|
||||||
|
me_health_interval_ms_unhealthy,
|
||||||
|
me_health_interval_ms_healthy,
|
||||||
|
..GeneralConfig::default()
|
||||||
|
};
|
||||||
|
let rng = Arc::new(SecureRandom::new());
|
||||||
|
let pool = MePool::new(
|
||||||
|
None,
|
||||||
|
vec![1u8; 32],
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
None,
|
||||||
|
Vec::new(),
|
||||||
|
1,
|
||||||
|
None,
|
||||||
|
12,
|
||||||
|
1200,
|
||||||
|
HashMap::new(),
|
||||||
|
HashMap::new(),
|
||||||
|
None,
|
||||||
|
NetworkDecision::default(),
|
||||||
|
None,
|
||||||
|
rng.clone(),
|
||||||
|
Arc::new(Stats::default()),
|
||||||
|
general.me_keepalive_enabled,
|
||||||
|
general.me_keepalive_interval_secs,
|
||||||
|
general.me_keepalive_jitter_secs,
|
||||||
|
general.me_keepalive_payload_random,
|
||||||
|
general.rpc_proxy_req_every,
|
||||||
|
general.me_warmup_stagger_enabled,
|
||||||
|
general.me_warmup_step_delay_ms,
|
||||||
|
general.me_warmup_step_jitter_ms,
|
||||||
|
general.me_reconnect_max_concurrent_per_dc,
|
||||||
|
general.me_reconnect_backoff_base_ms,
|
||||||
|
general.me_reconnect_backoff_cap_ms,
|
||||||
|
general.me_reconnect_fast_retry_count,
|
||||||
|
general.me_single_endpoint_shadow_writers,
|
||||||
|
general.me_single_endpoint_outage_mode_enabled,
|
||||||
|
general.me_single_endpoint_outage_disable_quarantine,
|
||||||
|
general.me_single_endpoint_outage_backoff_min_ms,
|
||||||
|
general.me_single_endpoint_outage_backoff_max_ms,
|
||||||
|
general.me_single_endpoint_shadow_rotate_every_secs,
|
||||||
|
general.me_floor_mode,
|
||||||
|
general.me_adaptive_floor_idle_secs,
|
||||||
|
general.me_adaptive_floor_min_writers_single_endpoint,
|
||||||
|
general.me_adaptive_floor_min_writers_multi_endpoint,
|
||||||
|
general.me_adaptive_floor_recover_grace_secs,
|
||||||
|
general.me_adaptive_floor_writers_per_core_total,
|
||||||
|
general.me_adaptive_floor_cpu_cores_override,
|
||||||
|
general.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||||
|
general.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||||
|
general.me_adaptive_floor_max_active_writers_per_core,
|
||||||
|
general.me_adaptive_floor_max_warm_writers_per_core,
|
||||||
|
general.me_adaptive_floor_max_active_writers_global,
|
||||||
|
general.me_adaptive_floor_max_warm_writers_global,
|
||||||
|
general.hardswap,
|
||||||
|
general.me_pool_drain_ttl_secs,
|
||||||
|
general.me_pool_drain_threshold,
|
||||||
|
general.effective_me_pool_force_close_secs(),
|
||||||
|
general.me_pool_min_fresh_ratio,
|
||||||
|
general.me_hardswap_warmup_delay_min_ms,
|
||||||
|
general.me_hardswap_warmup_delay_max_ms,
|
||||||
|
general.me_hardswap_warmup_extra_passes,
|
||||||
|
general.me_hardswap_warmup_pass_backoff_base_ms,
|
||||||
|
general.me_bind_stale_mode,
|
||||||
|
general.me_bind_stale_ttl_secs,
|
||||||
|
general.me_secret_atomic_snapshot,
|
||||||
|
general.me_deterministic_writer_sort,
|
||||||
|
MeWriterPickMode::default(),
|
||||||
|
general.me_writer_pick_sample_size,
|
||||||
|
MeSocksKdfPolicy::default(),
|
||||||
|
general.me_writer_cmd_channel_capacity,
|
||||||
|
general.me_route_channel_capacity,
|
||||||
|
general.me_route_backpressure_base_timeout_ms,
|
||||||
|
general.me_route_backpressure_high_timeout_ms,
|
||||||
|
general.me_route_backpressure_high_watermark_pct,
|
||||||
|
general.me_reader_route_data_wait_ms,
|
||||||
|
general.me_health_interval_ms_unhealthy,
|
||||||
|
general.me_health_interval_ms_healthy,
|
||||||
|
general.me_warn_rate_limit_ms,
|
||||||
|
MeRouteNoWriterMode::default(),
|
||||||
|
general.me_route_no_writer_wait_ms,
|
||||||
|
general.me_route_inline_recovery_attempts,
|
||||||
|
general.me_route_inline_recovery_wait_ms,
|
||||||
|
);
|
||||||
|
(pool, rng)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn insert_draining_writer(
|
||||||
|
pool: &Arc<MePool>,
|
||||||
|
writer_id: u64,
|
||||||
|
drain_started_at_epoch_secs: u64,
|
||||||
|
bound_clients: usize,
|
||||||
|
drain_deadline_epoch_secs: u64,
|
||||||
|
) {
|
||||||
|
let (tx, _writer_rx) = mpsc::channel::<WriterCommand>(8);
|
||||||
|
let writer = MeWriter {
|
||||||
|
id: writer_id,
|
||||||
|
addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 5500 + writer_id as u16),
|
||||||
|
source_ip: IpAddr::V4(Ipv4Addr::LOCALHOST),
|
||||||
|
writer_dc: 2,
|
||||||
|
generation: 1,
|
||||||
|
contour: Arc::new(AtomicU8::new(WriterContour::Draining.as_u8())),
|
||||||
|
created_at: Instant::now() - Duration::from_secs(writer_id),
|
||||||
|
tx: tx.clone(),
|
||||||
|
cancel: CancellationToken::new(),
|
||||||
|
degraded: Arc::new(AtomicBool::new(false)),
|
||||||
|
rtt_ema_ms_x10: Arc::new(AtomicU32::new(0)),
|
||||||
|
draining: Arc::new(AtomicBool::new(true)),
|
||||||
|
draining_started_at_epoch_secs: Arc::new(AtomicU64::new(drain_started_at_epoch_secs)),
|
||||||
|
drain_deadline_epoch_secs: Arc::new(AtomicU64::new(drain_deadline_epoch_secs)),
|
||||||
|
allow_drain_fallback: Arc::new(AtomicBool::new(false)),
|
||||||
|
};
|
||||||
|
pool.writers.write().await.push(writer);
|
||||||
|
pool.registry.register_writer(writer_id, tx).await;
|
||||||
|
pool.conn_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
for idx in 0..bound_clients {
|
||||||
|
let (conn_id, _rx) = pool.registry.register().await;
|
||||||
|
assert!(
|
||||||
|
pool.registry
|
||||||
|
.bind_writer(
|
||||||
|
conn_id,
|
||||||
|
writer_id,
|
||||||
|
ConnMeta {
|
||||||
|
target_dc: 2,
|
||||||
|
client_addr: SocketAddr::new(
|
||||||
|
IpAddr::V4(Ipv4Addr::LOCALHOST),
|
||||||
|
7200 + idx as u16,
|
||||||
|
),
|
||||||
|
our_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443),
|
||||||
|
proto_flags: 0,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn me_health_monitor_drains_expired_backlog_over_multiple_cycles() {
|
||||||
|
let (pool, rng) = make_pool(128, 1, 1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let writer_total = health_drain_close_budget().saturating_mul(2).saturating_add(9);
|
||||||
|
for writer_id in 1..=writer_total as u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(120),
|
||||||
|
1,
|
||||||
|
now_epoch_secs.saturating_sub(1),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let monitor = tokio::spawn(me_health_monitor(pool.clone(), rng, 0));
|
||||||
|
tokio::time::sleep(Duration::from_millis(60)).await;
|
||||||
|
monitor.abort();
|
||||||
|
let _ = monitor.await;
|
||||||
|
|
||||||
|
assert!(pool.writers.read().await.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn me_health_monitor_cleans_empty_draining_writers_without_force_close() {
|
||||||
|
let (pool, rng) = make_pool(128, 1, 1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
for writer_id in 1..=24u64 {
|
||||||
|
insert_draining_writer(&pool, writer_id, now_epoch_secs.saturating_sub(60), 0, 0).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let monitor = tokio::spawn(me_health_monitor(pool.clone(), rng, 0));
|
||||||
|
tokio::time::sleep(Duration::from_millis(30)).await;
|
||||||
|
monitor.abort();
|
||||||
|
let _ = monitor.await;
|
||||||
|
|
||||||
|
assert!(pool.writers.read().await.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn me_health_monitor_converges_retry_like_threshold_backlog_to_empty() {
|
||||||
|
let threshold = 4u64;
|
||||||
|
let (pool, rng) = make_pool(threshold, 1, 1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let writer_total = threshold as usize + health_drain_close_budget().saturating_add(11);
|
||||||
|
for writer_id in 1..=writer_total as u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(300).saturating_add(writer_id),
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let monitor = tokio::spawn(me_health_monitor(pool.clone(), rng, 0));
|
||||||
|
tokio::time::sleep(Duration::from_millis(60)).await;
|
||||||
|
monitor.abort();
|
||||||
|
let _ = monitor.await;
|
||||||
|
|
||||||
|
assert!(pool.writers.read().await.is_empty());
|
||||||
|
}
|
||||||
462
src/transport/middle_proxy/health_regression_tests.rs
Normal file
462
src/transport/middle_proxy/health_regression_tests.rs
Normal file
@@ -0,0 +1,462 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicBool, AtomicU8, AtomicU32, AtomicU64, Ordering};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tokio_util::sync::CancellationToken;
|
||||||
|
|
||||||
|
use super::codec::WriterCommand;
|
||||||
|
use super::health::{health_drain_close_budget, reap_draining_writers};
|
||||||
|
use super::pool::{MePool, MeWriter, WriterContour};
|
||||||
|
use super::registry::ConnMeta;
|
||||||
|
use crate::config::{GeneralConfig, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode};
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::network::probe::NetworkDecision;
|
||||||
|
use crate::stats::Stats;
|
||||||
|
|
||||||
|
async fn make_pool(me_pool_drain_threshold: u64) -> Arc<MePool> {
|
||||||
|
let general = GeneralConfig {
|
||||||
|
me_pool_drain_threshold,
|
||||||
|
..GeneralConfig::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
MePool::new(
|
||||||
|
None,
|
||||||
|
vec![1u8; 32],
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
None,
|
||||||
|
Vec::new(),
|
||||||
|
1,
|
||||||
|
None,
|
||||||
|
12,
|
||||||
|
1200,
|
||||||
|
HashMap::new(),
|
||||||
|
HashMap::new(),
|
||||||
|
None,
|
||||||
|
NetworkDecision::default(),
|
||||||
|
None,
|
||||||
|
Arc::new(SecureRandom::new()),
|
||||||
|
Arc::new(Stats::default()),
|
||||||
|
general.me_keepalive_enabled,
|
||||||
|
general.me_keepalive_interval_secs,
|
||||||
|
general.me_keepalive_jitter_secs,
|
||||||
|
general.me_keepalive_payload_random,
|
||||||
|
general.rpc_proxy_req_every,
|
||||||
|
general.me_warmup_stagger_enabled,
|
||||||
|
general.me_warmup_step_delay_ms,
|
||||||
|
general.me_warmup_step_jitter_ms,
|
||||||
|
general.me_reconnect_max_concurrent_per_dc,
|
||||||
|
general.me_reconnect_backoff_base_ms,
|
||||||
|
general.me_reconnect_backoff_cap_ms,
|
||||||
|
general.me_reconnect_fast_retry_count,
|
||||||
|
general.me_single_endpoint_shadow_writers,
|
||||||
|
general.me_single_endpoint_outage_mode_enabled,
|
||||||
|
general.me_single_endpoint_outage_disable_quarantine,
|
||||||
|
general.me_single_endpoint_outage_backoff_min_ms,
|
||||||
|
general.me_single_endpoint_outage_backoff_max_ms,
|
||||||
|
general.me_single_endpoint_shadow_rotate_every_secs,
|
||||||
|
general.me_floor_mode,
|
||||||
|
general.me_adaptive_floor_idle_secs,
|
||||||
|
general.me_adaptive_floor_min_writers_single_endpoint,
|
||||||
|
general.me_adaptive_floor_min_writers_multi_endpoint,
|
||||||
|
general.me_adaptive_floor_recover_grace_secs,
|
||||||
|
general.me_adaptive_floor_writers_per_core_total,
|
||||||
|
general.me_adaptive_floor_cpu_cores_override,
|
||||||
|
general.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||||
|
general.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||||
|
general.me_adaptive_floor_max_active_writers_per_core,
|
||||||
|
general.me_adaptive_floor_max_warm_writers_per_core,
|
||||||
|
general.me_adaptive_floor_max_active_writers_global,
|
||||||
|
general.me_adaptive_floor_max_warm_writers_global,
|
||||||
|
general.hardswap,
|
||||||
|
general.me_pool_drain_ttl_secs,
|
||||||
|
general.me_pool_drain_threshold,
|
||||||
|
general.effective_me_pool_force_close_secs(),
|
||||||
|
general.me_pool_min_fresh_ratio,
|
||||||
|
general.me_hardswap_warmup_delay_min_ms,
|
||||||
|
general.me_hardswap_warmup_delay_max_ms,
|
||||||
|
general.me_hardswap_warmup_extra_passes,
|
||||||
|
general.me_hardswap_warmup_pass_backoff_base_ms,
|
||||||
|
general.me_bind_stale_mode,
|
||||||
|
general.me_bind_stale_ttl_secs,
|
||||||
|
general.me_secret_atomic_snapshot,
|
||||||
|
general.me_deterministic_writer_sort,
|
||||||
|
MeWriterPickMode::default(),
|
||||||
|
general.me_writer_pick_sample_size,
|
||||||
|
MeSocksKdfPolicy::default(),
|
||||||
|
general.me_writer_cmd_channel_capacity,
|
||||||
|
general.me_route_channel_capacity,
|
||||||
|
general.me_route_backpressure_base_timeout_ms,
|
||||||
|
general.me_route_backpressure_high_timeout_ms,
|
||||||
|
general.me_route_backpressure_high_watermark_pct,
|
||||||
|
general.me_reader_route_data_wait_ms,
|
||||||
|
general.me_health_interval_ms_unhealthy,
|
||||||
|
general.me_health_interval_ms_healthy,
|
||||||
|
general.me_warn_rate_limit_ms,
|
||||||
|
MeRouteNoWriterMode::default(),
|
||||||
|
general.me_route_no_writer_wait_ms,
|
||||||
|
general.me_route_inline_recovery_attempts,
|
||||||
|
general.me_route_inline_recovery_wait_ms,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn insert_draining_writer(
|
||||||
|
pool: &Arc<MePool>,
|
||||||
|
writer_id: u64,
|
||||||
|
drain_started_at_epoch_secs: u64,
|
||||||
|
bound_clients: usize,
|
||||||
|
drain_deadline_epoch_secs: u64,
|
||||||
|
) -> Vec<u64> {
|
||||||
|
let mut conn_ids = Vec::with_capacity(bound_clients);
|
||||||
|
let (tx, _writer_rx) = mpsc::channel::<WriterCommand>(8);
|
||||||
|
let writer = MeWriter {
|
||||||
|
id: writer_id,
|
||||||
|
addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 4500 + writer_id as u16),
|
||||||
|
source_ip: IpAddr::V4(Ipv4Addr::LOCALHOST),
|
||||||
|
writer_dc: 2,
|
||||||
|
generation: 1,
|
||||||
|
contour: Arc::new(AtomicU8::new(WriterContour::Draining.as_u8())),
|
||||||
|
created_at: Instant::now() - Duration::from_secs(writer_id),
|
||||||
|
tx: tx.clone(),
|
||||||
|
cancel: CancellationToken::new(),
|
||||||
|
degraded: Arc::new(AtomicBool::new(false)),
|
||||||
|
rtt_ema_ms_x10: Arc::new(AtomicU32::new(0)),
|
||||||
|
draining: Arc::new(AtomicBool::new(true)),
|
||||||
|
draining_started_at_epoch_secs: Arc::new(AtomicU64::new(drain_started_at_epoch_secs)),
|
||||||
|
drain_deadline_epoch_secs: Arc::new(AtomicU64::new(drain_deadline_epoch_secs)),
|
||||||
|
allow_drain_fallback: Arc::new(AtomicBool::new(false)),
|
||||||
|
};
|
||||||
|
pool.writers.write().await.push(writer);
|
||||||
|
pool.registry.register_writer(writer_id, tx).await;
|
||||||
|
pool.conn_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
for idx in 0..bound_clients {
|
||||||
|
let (conn_id, _rx) = pool.registry.register().await;
|
||||||
|
assert!(
|
||||||
|
pool.registry
|
||||||
|
.bind_writer(
|
||||||
|
conn_id,
|
||||||
|
writer_id,
|
||||||
|
ConnMeta {
|
||||||
|
target_dc: 2,
|
||||||
|
client_addr: SocketAddr::new(
|
||||||
|
IpAddr::V4(Ipv4Addr::LOCALHOST),
|
||||||
|
6200 + idx as u16,
|
||||||
|
),
|
||||||
|
our_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443),
|
||||||
|
proto_flags: 0,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
);
|
||||||
|
conn_ids.push(conn_id);
|
||||||
|
}
|
||||||
|
conn_ids
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn current_writer_ids(pool: &Arc<MePool>) -> Vec<u64> {
|
||||||
|
let mut writer_ids = pool
|
||||||
|
.writers
|
||||||
|
.read()
|
||||||
|
.await
|
||||||
|
.iter()
|
||||||
|
.map(|writer| writer.id)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
writer_ids.sort_unstable();
|
||||||
|
writer_ids
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_drops_warn_state_for_removed_writer() {
|
||||||
|
let pool = make_pool(128).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let conn_ids =
|
||||||
|
insert_draining_writer(&pool, 7, now_epoch_secs.saturating_sub(180), 1, 0).await;
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
assert!(warn_next_allowed.contains_key(&7));
|
||||||
|
|
||||||
|
let _ = pool.remove_writer_and_close_clients(7).await;
|
||||||
|
assert!(pool.registry.get_writer(conn_ids[0]).await.is_none());
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
assert!(!warn_next_allowed.contains_key(&7));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_removes_empty_draining_writers() {
|
||||||
|
let pool = make_pool(128).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
insert_draining_writer(&pool, 1, now_epoch_secs.saturating_sub(40), 0, 0).await;
|
||||||
|
insert_draining_writer(&pool, 2, now_epoch_secs.saturating_sub(30), 0, 0).await;
|
||||||
|
insert_draining_writer(&pool, 3, now_epoch_secs.saturating_sub(20), 1, 0).await;
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
|
||||||
|
assert_eq!(current_writer_ids(&pool).await, vec![3]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_overflow_closes_oldest_non_empty_writers() {
|
||||||
|
let pool = make_pool(2).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
insert_draining_writer(&pool, 11, now_epoch_secs.saturating_sub(40), 1, 0).await;
|
||||||
|
insert_draining_writer(&pool, 22, now_epoch_secs.saturating_sub(30), 1, 0).await;
|
||||||
|
insert_draining_writer(&pool, 33, now_epoch_secs.saturating_sub(20), 1, 0).await;
|
||||||
|
insert_draining_writer(&pool, 44, now_epoch_secs.saturating_sub(10), 1, 0).await;
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
|
||||||
|
assert_eq!(current_writer_ids(&pool).await, vec![33, 44]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_deadline_force_close_applies_under_threshold() {
|
||||||
|
let pool = make_pool(128).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
50,
|
||||||
|
now_epoch_secs.saturating_sub(15),
|
||||||
|
1,
|
||||||
|
now_epoch_secs.saturating_sub(1),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
|
||||||
|
assert!(current_writer_ids(&pool).await.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_limits_closes_per_health_tick() {
|
||||||
|
let pool = make_pool(128).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let close_budget = health_drain_close_budget();
|
||||||
|
let writer_total = close_budget.saturating_add(19);
|
||||||
|
for writer_id in 1..=writer_total as u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(20),
|
||||||
|
1,
|
||||||
|
now_epoch_secs.saturating_sub(1),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
|
||||||
|
assert_eq!(pool.writers.read().await.len(), writer_total - close_budget);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_backlog_drains_across_ticks() {
|
||||||
|
let pool = make_pool(128).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let close_budget = health_drain_close_budget();
|
||||||
|
let writer_total = close_budget.saturating_mul(2).saturating_add(7);
|
||||||
|
for writer_id in 1..=writer_total as u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(20),
|
||||||
|
1,
|
||||||
|
now_epoch_secs.saturating_sub(1),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
for _ in 0..8 {
|
||||||
|
if pool.writers.read().await.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(pool.writers.read().await.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_threshold_backlog_converges_to_threshold() {
|
||||||
|
let threshold = 5u64;
|
||||||
|
let pool = make_pool(threshold).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let close_budget = health_drain_close_budget();
|
||||||
|
let writer_total = threshold as usize + close_budget.saturating_add(12);
|
||||||
|
for writer_id in 1..=writer_total as u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(200).saturating_add(writer_id),
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
for _ in 0..16 {
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
if pool.writers.read().await.len() <= threshold as usize {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(pool.writers.read().await.len(), threshold as usize);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_threshold_zero_preserves_non_expired_non_empty_writers() {
|
||||||
|
let pool = make_pool(0).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
insert_draining_writer(&pool, 10, now_epoch_secs.saturating_sub(40), 1, 0).await;
|
||||||
|
insert_draining_writer(&pool, 20, now_epoch_secs.saturating_sub(30), 1, 0).await;
|
||||||
|
insert_draining_writer(&pool, 30, now_epoch_secs.saturating_sub(20), 1, 0).await;
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
|
||||||
|
assert_eq!(current_writer_ids(&pool).await, vec![10, 20, 30]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_prioritizes_force_close_before_empty_cleanup() {
|
||||||
|
let pool = make_pool(128).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let close_budget = health_drain_close_budget();
|
||||||
|
for writer_id in 1..=close_budget as u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(20),
|
||||||
|
1,
|
||||||
|
now_epoch_secs.saturating_sub(1),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
let empty_writer_id = close_budget as u64 + 1;
|
||||||
|
insert_draining_writer(&pool, empty_writer_id, now_epoch_secs.saturating_sub(20), 0, 0).await;
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
|
||||||
|
assert_eq!(current_writer_ids(&pool).await, vec![empty_writer_id]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_empty_cleanup_does_not_increment_force_close_metric() {
|
||||||
|
let pool = make_pool(128).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
insert_draining_writer(&pool, 1, now_epoch_secs.saturating_sub(60), 0, 0).await;
|
||||||
|
insert_draining_writer(&pool, 2, now_epoch_secs.saturating_sub(50), 0, 0).await;
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
|
||||||
|
assert!(current_writer_ids(&pool).await.is_empty());
|
||||||
|
assert_eq!(pool.stats.get_pool_force_close_total(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_handles_duplicate_force_close_requests_for_same_writer() {
|
||||||
|
let pool = make_pool(1).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
10,
|
||||||
|
now_epoch_secs.saturating_sub(30),
|
||||||
|
1,
|
||||||
|
now_epoch_secs.saturating_sub(1),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
20,
|
||||||
|
now_epoch_secs.saturating_sub(20),
|
||||||
|
1,
|
||||||
|
now_epoch_secs.saturating_sub(1),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
|
||||||
|
assert!(current_writer_ids(&pool).await.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_warn_state_never_exceeds_live_draining_population_under_churn() {
|
||||||
|
let pool = make_pool(128).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
for wave in 0..12u64 {
|
||||||
|
for offset in 0..9u64 {
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
wave * 100 + offset,
|
||||||
|
now_epoch_secs.saturating_sub(120 + offset),
|
||||||
|
1,
|
||||||
|
0,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
assert!(warn_next_allowed.len() <= pool.writers.read().await.len());
|
||||||
|
|
||||||
|
let existing_writer_ids = current_writer_ids(&pool).await;
|
||||||
|
for writer_id in existing_writer_ids.into_iter().take(4) {
|
||||||
|
let _ = pool.remove_writer_and_close_clients(writer_id).await;
|
||||||
|
}
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
assert!(warn_next_allowed.len() <= pool.writers.read().await.len());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn reap_draining_writers_mixed_backlog_converges_without_leaking_warn_state() {
|
||||||
|
let pool = make_pool(6).await;
|
||||||
|
let now_epoch_secs = MePool::now_epoch_secs();
|
||||||
|
let mut warn_next_allowed = HashMap::new();
|
||||||
|
|
||||||
|
for writer_id in 1..=18u64 {
|
||||||
|
let bound_clients = if writer_id % 3 == 0 { 0 } else { 1 };
|
||||||
|
let deadline = if writer_id % 2 == 0 {
|
||||||
|
now_epoch_secs.saturating_sub(1)
|
||||||
|
} else {
|
||||||
|
0
|
||||||
|
};
|
||||||
|
insert_draining_writer(
|
||||||
|
&pool,
|
||||||
|
writer_id,
|
||||||
|
now_epoch_secs.saturating_sub(300).saturating_add(writer_id),
|
||||||
|
bound_clients,
|
||||||
|
deadline,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
for _ in 0..16 {
|
||||||
|
reap_draining_writers(&pool, &mut warn_next_allowed).await;
|
||||||
|
if pool.writers.read().await.len() <= 6 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assert!(pool.writers.read().await.len() <= 6);
|
||||||
|
assert!(warn_next_allowed.len() <= pool.writers.read().await.len());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn general_config_default_drain_threshold_remains_enabled() {
|
||||||
|
assert_eq!(GeneralConfig::default().me_pool_drain_threshold, 128);
|
||||||
|
}
|
||||||
@@ -21,6 +21,12 @@ mod secret;
|
|||||||
mod selftest;
|
mod selftest;
|
||||||
mod wire;
|
mod wire;
|
||||||
mod pool_status;
|
mod pool_status;
|
||||||
|
#[cfg(test)]
|
||||||
|
mod health_regression_tests;
|
||||||
|
#[cfg(test)]
|
||||||
|
mod health_integration_tests;
|
||||||
|
#[cfg(test)]
|
||||||
|
mod health_adversarial_tests;
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
|
|
||||||
|
|||||||
@@ -171,6 +171,7 @@ pub struct MePool {
|
|||||||
pub(super) endpoint_quarantine: Arc<Mutex<HashMap<SocketAddr, Instant>>>,
|
pub(super) endpoint_quarantine: Arc<Mutex<HashMap<SocketAddr, Instant>>>,
|
||||||
pub(super) kdf_material_fingerprint: Arc<RwLock<HashMap<SocketAddr, (u64, u16)>>>,
|
pub(super) kdf_material_fingerprint: Arc<RwLock<HashMap<SocketAddr, (u64, u16)>>>,
|
||||||
pub(super) me_pool_drain_ttl_secs: AtomicU64,
|
pub(super) me_pool_drain_ttl_secs: AtomicU64,
|
||||||
|
pub(super) me_pool_drain_threshold: AtomicU64,
|
||||||
pub(super) me_pool_force_close_secs: AtomicU64,
|
pub(super) me_pool_force_close_secs: AtomicU64,
|
||||||
pub(super) me_pool_min_fresh_ratio_permille: AtomicU32,
|
pub(super) me_pool_min_fresh_ratio_permille: AtomicU32,
|
||||||
pub(super) me_hardswap_warmup_delay_min_ms: AtomicU64,
|
pub(super) me_hardswap_warmup_delay_min_ms: AtomicU64,
|
||||||
@@ -271,6 +272,7 @@ impl MePool {
|
|||||||
me_adaptive_floor_max_warm_writers_global: u32,
|
me_adaptive_floor_max_warm_writers_global: u32,
|
||||||
hardswap: bool,
|
hardswap: bool,
|
||||||
me_pool_drain_ttl_secs: u64,
|
me_pool_drain_ttl_secs: u64,
|
||||||
|
me_pool_drain_threshold: u64,
|
||||||
me_pool_force_close_secs: u64,
|
me_pool_force_close_secs: u64,
|
||||||
me_pool_min_fresh_ratio: f32,
|
me_pool_min_fresh_ratio: f32,
|
||||||
me_hardswap_warmup_delay_min_ms: u64,
|
me_hardswap_warmup_delay_min_ms: u64,
|
||||||
@@ -446,6 +448,7 @@ impl MePool {
|
|||||||
endpoint_quarantine: Arc::new(Mutex::new(HashMap::new())),
|
endpoint_quarantine: Arc::new(Mutex::new(HashMap::new())),
|
||||||
kdf_material_fingerprint: Arc::new(RwLock::new(HashMap::new())),
|
kdf_material_fingerprint: Arc::new(RwLock::new(HashMap::new())),
|
||||||
me_pool_drain_ttl_secs: AtomicU64::new(me_pool_drain_ttl_secs),
|
me_pool_drain_ttl_secs: AtomicU64::new(me_pool_drain_ttl_secs),
|
||||||
|
me_pool_drain_threshold: AtomicU64::new(me_pool_drain_threshold),
|
||||||
me_pool_force_close_secs: AtomicU64::new(me_pool_force_close_secs),
|
me_pool_force_close_secs: AtomicU64::new(me_pool_force_close_secs),
|
||||||
me_pool_min_fresh_ratio_permille: AtomicU32::new(Self::ratio_to_permille(
|
me_pool_min_fresh_ratio_permille: AtomicU32::new(Self::ratio_to_permille(
|
||||||
me_pool_min_fresh_ratio,
|
me_pool_min_fresh_ratio,
|
||||||
@@ -492,6 +495,7 @@ impl MePool {
|
|||||||
&self,
|
&self,
|
||||||
hardswap: bool,
|
hardswap: bool,
|
||||||
drain_ttl_secs: u64,
|
drain_ttl_secs: u64,
|
||||||
|
pool_drain_threshold: u64,
|
||||||
force_close_secs: u64,
|
force_close_secs: u64,
|
||||||
min_fresh_ratio: f32,
|
min_fresh_ratio: f32,
|
||||||
hardswap_warmup_delay_min_ms: u64,
|
hardswap_warmup_delay_min_ms: u64,
|
||||||
@@ -530,6 +534,8 @@ impl MePool {
|
|||||||
self.hardswap.store(hardswap, Ordering::Relaxed);
|
self.hardswap.store(hardswap, Ordering::Relaxed);
|
||||||
self.me_pool_drain_ttl_secs
|
self.me_pool_drain_ttl_secs
|
||||||
.store(drain_ttl_secs, Ordering::Relaxed);
|
.store(drain_ttl_secs, Ordering::Relaxed);
|
||||||
|
self.me_pool_drain_threshold
|
||||||
|
.store(pool_drain_threshold, Ordering::Relaxed);
|
||||||
self.me_pool_force_close_secs
|
self.me_pool_force_close_secs
|
||||||
.store(force_close_secs, Ordering::Relaxed);
|
.store(force_close_secs, Ordering::Relaxed);
|
||||||
self.me_pool_min_fresh_ratio_permille
|
self.me_pool_min_fresh_ratio_permille
|
||||||
|
|||||||
Reference in New Issue
Block a user