ME Pool Updater + Soft-staged Reinit w/o Reconcile

Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
This commit is contained in:
Alexey 2026-02-23 16:04:19 +03:00
parent d08ddd718a
commit d8dcbbb61e
No known key found for this signature in database
8 changed files with 336 additions and 60 deletions

View File

@ -195,6 +195,8 @@ fast_mode = true
use_middle_proxy = false use_middle_proxy = false
log_level = "normal" log_level = "normal"
desync_all_full = false desync_all_full = false
update_every = 43200
me_reinit_drain_timeout_secs = 300
[network] [network]
ipv4 = true ipv4 = true

View File

@ -171,6 +171,14 @@ pub(crate) fn default_proxy_config_reload_secs() -> u64 {
12 * 60 * 60 12 * 60 * 60
} }
pub(crate) fn default_update_every_secs() -> u64 {
12 * 60 * 60
}
pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 {
300
}
pub(crate) fn default_ntp_check() -> bool { pub(crate) fn default_ntp_check() -> bool {
true true
} }

View File

@ -11,6 +11,8 @@
//! | `general` | `middle_proxy_pool_size` | Passed on next connection | //! | `general` | `middle_proxy_pool_size` | Passed on next connection |
//! | `general` | `me_keepalive_*` | Passed on next connection | //! | `general` | `me_keepalive_*` | Passed on next connection |
//! | `general` | `desync_all_full` | Applied immediately | //! | `general` | `desync_all_full` | Applied immediately |
//! | `general` | `update_every` | Applied to ME updater immediately |
//! | `general` | `me_reinit_drain_timeout_secs`| Applied on next ME map update |
//! | `access` | All user/quota fields | Effective immediately | //! | `access` | All user/quota fields | Effective immediately |
//! //!
//! Fields that require re-binding sockets (`server.port`, `censorship.*`, //! Fields that require re-binding sockets (`server.port`, `censorship.*`,
@ -36,6 +38,8 @@ pub struct HotFields {
pub ad_tag: Option<String>, pub ad_tag: Option<String>,
pub middle_proxy_pool_size: usize, pub middle_proxy_pool_size: usize,
pub desync_all_full: bool, pub desync_all_full: bool,
pub update_every_secs: u64,
pub me_reinit_drain_timeout_secs: u64,
pub me_keepalive_enabled: bool, pub me_keepalive_enabled: bool,
pub me_keepalive_interval_secs: u64, pub me_keepalive_interval_secs: u64,
pub me_keepalive_jitter_secs: u64, pub me_keepalive_jitter_secs: u64,
@ -50,6 +54,8 @@ impl HotFields {
ad_tag: cfg.general.ad_tag.clone(), ad_tag: cfg.general.ad_tag.clone(),
middle_proxy_pool_size: cfg.general.middle_proxy_pool_size, middle_proxy_pool_size: cfg.general.middle_proxy_pool_size,
desync_all_full: cfg.general.desync_all_full, desync_all_full: cfg.general.desync_all_full,
update_every_secs: cfg.general.effective_update_every_secs(),
me_reinit_drain_timeout_secs: cfg.general.me_reinit_drain_timeout_secs,
me_keepalive_enabled: cfg.general.me_keepalive_enabled, me_keepalive_enabled: cfg.general.me_keepalive_enabled,
me_keepalive_interval_secs: cfg.general.me_keepalive_interval_secs, me_keepalive_interval_secs: cfg.general.me_keepalive_interval_secs,
me_keepalive_jitter_secs: cfg.general.me_keepalive_jitter_secs, me_keepalive_jitter_secs: cfg.general.me_keepalive_jitter_secs,
@ -185,6 +191,20 @@ fn log_changes(
); );
} }
if old_hot.update_every_secs != new_hot.update_every_secs {
info!(
"config reload: update_every(effective): {}s → {}s",
old_hot.update_every_secs, new_hot.update_every_secs,
);
}
if old_hot.me_reinit_drain_timeout_secs != new_hot.me_reinit_drain_timeout_secs {
info!(
"config reload: me_reinit_drain_timeout_secs: {}s → {}s",
old_hot.me_reinit_drain_timeout_secs, new_hot.me_reinit_drain_timeout_secs,
);
}
if old_hot.me_keepalive_enabled != new_hot.me_keepalive_enabled if old_hot.me_keepalive_enabled != new_hot.me_keepalive_enabled
|| old_hot.me_keepalive_interval_secs != new_hot.me_keepalive_interval_secs || old_hot.me_keepalive_interval_secs != new_hot.me_keepalive_interval_secs
|| old_hot.me_keepalive_jitter_secs != new_hot.me_keepalive_jitter_secs || old_hot.me_keepalive_jitter_secs != new_hot.me_keepalive_jitter_secs

View File

@ -117,6 +117,34 @@ impl ProxyConfig {
let mut config: ProxyConfig = let mut config: ProxyConfig =
toml::from_str(&processed).map_err(|e| ProxyError::Config(e.to_string()))?; toml::from_str(&processed).map_err(|e| ProxyError::Config(e.to_string()))?;
if let Some(update_every) = config.general.update_every {
if update_every == 0 {
return Err(ProxyError::Config(
"general.update_every must be > 0".to_string(),
));
}
} else {
let legacy_secret = config.general.proxy_secret_auto_reload_secs;
let legacy_config = config.general.proxy_config_auto_reload_secs;
let effective = legacy_secret.min(legacy_config);
if effective == 0 {
return Err(ProxyError::Config(
"legacy proxy_*_auto_reload_secs values must be > 0 when general.update_every is not set".to_string(),
));
}
if legacy_secret != default_proxy_secret_reload_secs()
|| legacy_config != default_proxy_config_reload_secs()
{
warn!(
proxy_secret_auto_reload_secs = legacy_secret,
proxy_config_auto_reload_secs = legacy_config,
effective_update_every_secs = effective,
"proxy_*_auto_reload_secs are deprecated; set general.update_every"
);
}
}
// Validate secrets. // Validate secrets.
for (user, secret) in &config.access.users { for (user, secret) in &config.access.users {
if !secret.chars().all(|c| c.is_ascii_hexdigit()) || secret.len() != 32 { if !secret.chars().all(|c| c.is_ascii_hexdigit()) || secret.len() != 32 {

View File

@ -257,11 +257,23 @@ pub struct GeneralConfig {
#[serde(default = "default_fast_mode_min_tls_record")] #[serde(default = "default_fast_mode_min_tls_record")]
pub fast_mode_min_tls_record: usize, pub fast_mode_min_tls_record: usize,
/// Automatically reload proxy-secret every N seconds. /// Unified ME updater interval in seconds for getProxyConfig/getProxyConfigV6/getProxySecret.
/// When omitted, effective value falls back to legacy proxy_*_auto_reload_secs fields.
#[serde(default)]
pub update_every: Option<u64>,
/// Drain timeout in seconds for stale ME writers after endpoint map changes.
/// Set to 0 to keep stale writers draining indefinitely (no force-close).
#[serde(default = "default_me_reinit_drain_timeout_secs")]
pub me_reinit_drain_timeout_secs: u64,
/// Deprecated legacy setting; kept for backward compatibility fallback.
/// Use `update_every` instead.
#[serde(default = "default_proxy_secret_reload_secs")] #[serde(default = "default_proxy_secret_reload_secs")]
pub proxy_secret_auto_reload_secs: u64, pub proxy_secret_auto_reload_secs: u64,
/// Automatically reload proxy-multi.conf every N seconds. /// Deprecated legacy setting; kept for backward compatibility fallback.
/// Use `update_every` instead.
#[serde(default = "default_proxy_config_reload_secs")] #[serde(default = "default_proxy_config_reload_secs")]
pub proxy_config_auto_reload_secs: u64, pub proxy_config_auto_reload_secs: u64,
@ -317,6 +329,8 @@ impl Default for GeneralConfig {
max_client_frame: default_max_client_frame(), max_client_frame: default_max_client_frame(),
desync_all_full: default_desync_all_full(), desync_all_full: default_desync_all_full(),
fast_mode_min_tls_record: default_fast_mode_min_tls_record(), fast_mode_min_tls_record: default_fast_mode_min_tls_record(),
update_every: Some(default_update_every_secs()),
me_reinit_drain_timeout_secs: default_me_reinit_drain_timeout_secs(),
proxy_secret_auto_reload_secs: default_proxy_secret_reload_secs(), proxy_secret_auto_reload_secs: default_proxy_secret_reload_secs(),
proxy_config_auto_reload_secs: default_proxy_config_reload_secs(), proxy_config_auto_reload_secs: default_proxy_config_reload_secs(),
ntp_check: default_ntp_check(), ntp_check: default_ntp_check(),
@ -327,6 +341,13 @@ impl Default for GeneralConfig {
} }
} }
impl GeneralConfig {
pub fn effective_update_every_secs(&self) -> u64 {
self.update_every
.unwrap_or_else(|| self.proxy_secret_auto_reload_secs.min(self.proxy_config_auto_reload_secs))
}
}
/// `[general.links]` — proxy link generation settings. /// `[general.links]` — proxy link generation settings.
#[derive(Debug, Clone, Serialize, Deserialize, Default)] #[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct LinksConfig { pub struct LinksConfig {

View File

@ -392,18 +392,6 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
.await; .await;
}); });
// Periodic updater: getProxyConfig + proxy-secret
let pool_clone2 = pool.clone();
let rng_clone2 = rng.clone();
tokio::spawn(async move {
crate::transport::middle_proxy::me_config_updater(
pool_clone2,
rng_clone2,
std::time::Duration::from_secs(12 * 3600),
)
.await;
});
Some(pool) Some(pool)
} }
Err(e) => { Err(e) => {
@ -702,6 +690,20 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
detected_ip_v6, detected_ip_v6,
); );
if let Some(ref pool) = me_pool {
let pool_clone = pool.clone();
let rng_clone = rng.clone();
let config_rx_clone = config_rx.clone();
tokio::spawn(async move {
crate::transport::middle_proxy::me_config_updater(
pool_clone,
rng_clone,
config_rx_clone,
)
.await;
});
}
let mut listeners = Vec::new(); let mut listeners = Vec::new();
for listener_conf in &config.server.listeners { for listener_conf in &config.server.listeners {

View File

@ -4,8 +4,10 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use httpdate; use httpdate;
use tokio::sync::watch;
use tracing::{debug, info, warn}; use tracing::{debug, info, warn};
use crate::config::ProxyConfig;
use crate::error::Result; use crate::error::Result;
use super::MePool; use super::MePool;
@ -128,23 +130,20 @@ pub async fn fetch_proxy_config(url: &str) -> Result<ProxyConfigData> {
Ok(ProxyConfigData { map, default_dc }) Ok(ProxyConfigData { map, default_dc })
} }
pub async fn me_config_updater(pool: Arc<MePool>, rng: Arc<SecureRandom>, interval: Duration) { async fn run_update_cycle(pool: &Arc<MePool>, rng: &Arc<SecureRandom>, cfg: &ProxyConfig) {
let mut tick = tokio::time::interval(interval); let mut maps_changed = false;
// skip immediate tick to avoid double-fetch right after startup
tick.tick().await;
loop {
tick.tick().await;
// Update proxy config v4 // Update proxy config v4
let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await; let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await;
if let Some(cfg) = cfg_v4 { if let Some(cfg_v4) = cfg_v4 {
let changed = pool.update_proxy_maps(cfg.map.clone(), None).await; let changed = pool.update_proxy_maps(cfg_v4.map.clone(), None).await;
if let Some(dc) = cfg.default_dc { if let Some(dc) = cfg_v4.default_dc {
pool.default_dc.store(dc, std::sync::atomic::Ordering::Relaxed); pool.default_dc
.store(dc, std::sync::atomic::Ordering::Relaxed);
} }
if changed { if changed {
info!("ME config updated (v4), reconciling connections"); maps_changed = true;
pool.reconcile_connections(&rng).await; info!("ME config updated (v4)");
} else { } else {
debug!("ME config v4 unchanged"); debug!("ME config v4 unchanged");
} }
@ -155,12 +154,23 @@ pub async fn me_config_updater(pool: Arc<MePool>, rng: Arc<SecureRandom>, interv
if let Some(cfg_v6) = cfg_v6 { if let Some(cfg_v6) = cfg_v6 {
let changed = pool.update_proxy_maps(HashMap::new(), Some(cfg_v6.map)).await; let changed = pool.update_proxy_maps(HashMap::new(), Some(cfg_v6.map)).await;
if changed { if changed {
info!("ME config updated (v6), reconciling connections"); maps_changed = true;
pool.reconcile_connections(&rng).await; info!("ME config updated (v6)");
} else { } else {
debug!("ME config v6 unchanged"); debug!("ME config v6 unchanged");
} }
} }
if maps_changed {
let drain_timeout = if cfg.general.me_reinit_drain_timeout_secs == 0 {
None
} else {
Some(Duration::from_secs(cfg.general.me_reinit_drain_timeout_secs))
};
pool.zero_downtime_reinit_after_map_change(rng.as_ref(), drain_timeout)
.await;
}
pool.reset_stun_state(); pool.reset_stun_state();
// Update proxy-secret // Update proxy-secret
@ -172,6 +182,75 @@ pub async fn me_config_updater(pool: Arc<MePool>, rng: Arc<SecureRandom>, interv
} }
Err(e) => warn!(error = %e, "proxy-secret update failed"), Err(e) => warn!(error = %e, "proxy-secret update failed"),
} }
}
pub async fn me_config_updater(
pool: Arc<MePool>,
rng: Arc<SecureRandom>,
mut config_rx: watch::Receiver<Arc<ProxyConfig>>,
) {
let mut update_every_secs = config_rx
.borrow()
.general
.effective_update_every_secs()
.max(1);
let mut update_every = Duration::from_secs(update_every_secs);
let mut next_tick = tokio::time::Instant::now() + update_every;
info!(update_every_secs, "ME config updater started");
loop {
let sleep = tokio::time::sleep_until(next_tick);
tokio::pin!(sleep);
tokio::select! {
_ = &mut sleep => {
let cfg = config_rx.borrow().clone();
run_update_cycle(&pool, &rng, cfg.as_ref()).await;
let refreshed_secs = cfg.general.effective_update_every_secs().max(1);
if refreshed_secs != update_every_secs {
info!(
old_update_every_secs = update_every_secs,
new_update_every_secs = refreshed_secs,
"ME config updater interval changed"
);
update_every_secs = refreshed_secs;
update_every = Duration::from_secs(update_every_secs);
}
next_tick = tokio::time::Instant::now() + update_every;
}
changed = config_rx.changed() => {
if changed.is_err() {
warn!("ME config updater stopped: config channel closed");
break;
}
let cfg = config_rx.borrow().clone();
let new_secs = cfg.general.effective_update_every_secs().max(1);
if new_secs == update_every_secs {
continue;
}
if new_secs < update_every_secs {
info!(
old_update_every_secs = update_every_secs,
new_update_every_secs = new_secs,
"ME config updater interval decreased, running immediate refresh"
);
update_every_secs = new_secs;
update_every = Duration::from_secs(update_every_secs);
run_update_cycle(&pool, &rng, cfg.as_ref()).await;
next_tick = tokio::time::Instant::now() + update_every;
} else {
info!(
old_update_every_secs = update_every_secs,
new_update_every_secs = new_secs,
"ME config updater interval increased"
);
update_every_secs = new_secs;
update_every = Duration::from_secs(update_every_secs);
next_tick = tokio::time::Instant::now() + update_every;
}
}
}
} }
} }

View File

@ -1,4 +1,4 @@
use std::collections::HashMap; use std::collections::{HashMap, HashSet};
use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::net::{IpAddr, Ipv6Addr, SocketAddr};
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicI32, AtomicU64, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicBool, AtomicI32, AtomicU64, AtomicUsize, Ordering};
@ -178,7 +178,6 @@ impl MePool {
} }
pub async fn reconcile_connections(self: &Arc<Self>, rng: &SecureRandom) { pub async fn reconcile_connections(self: &Arc<Self>, rng: &SecureRandom) {
use std::collections::HashSet;
let writers = self.writers.read().await; let writers = self.writers.read().await;
let current: HashSet<SocketAddr> = writers let current: HashSet<SocketAddr> = writers
.iter() .iter()
@ -210,6 +209,101 @@ impl MePool {
} }
} }
async fn desired_dc_endpoints(&self) -> HashMap<i32, HashSet<SocketAddr>> {
let mut out: HashMap<i32, HashSet<SocketAddr>> = HashMap::new();
if self.decision.ipv4_me {
let map_v4 = self.proxy_map_v4.read().await.clone();
for (dc, addrs) in map_v4 {
let entry = out.entry(dc.abs()).or_default();
for (ip, port) in addrs {
entry.insert(SocketAddr::new(ip, port));
}
}
}
if self.decision.ipv6_me {
let map_v6 = self.proxy_map_v6.read().await.clone();
for (dc, addrs) in map_v6 {
let entry = out.entry(dc.abs()).or_default();
for (ip, port) in addrs {
entry.insert(SocketAddr::new(ip, port));
}
}
}
out
}
pub async fn zero_downtime_reinit_after_map_change(
self: &Arc<Self>,
rng: &SecureRandom,
drain_timeout: Option<Duration>,
) {
self.reconcile_connections(rng).await;
let desired_by_dc = self.desired_dc_endpoints().await;
if desired_by_dc.is_empty() {
warn!("ME endpoint map is empty after update; skipping stale writer drain");
return;
}
let writers = self.writers.read().await;
let active_writer_addrs: HashSet<SocketAddr> = writers
.iter()
.filter(|w| !w.draining.load(Ordering::Relaxed))
.map(|w| w.addr)
.collect();
let mut missing_dc = Vec::<i32>::new();
for (dc, endpoints) in &desired_by_dc {
if endpoints.is_empty() {
continue;
}
if !endpoints.iter().any(|addr| active_writer_addrs.contains(addr)) {
missing_dc.push(*dc);
}
}
if !missing_dc.is_empty() {
missing_dc.sort_unstable();
warn!(
missing_dc = ?missing_dc,
"ME reinit coverage incomplete after map update; keeping stale writers"
);
return;
}
let desired_addrs: HashSet<SocketAddr> = desired_by_dc
.values()
.flat_map(|set| set.iter().copied())
.collect();
let stale_writer_ids: Vec<u64> = writers
.iter()
.filter(|w| !w.draining.load(Ordering::Relaxed))
.filter(|w| !desired_addrs.contains(&w.addr))
.map(|w| w.id)
.collect();
drop(writers);
if stale_writer_ids.is_empty() {
debug!("ME map update completed with no stale writers");
return;
}
let drain_timeout_secs = drain_timeout.map(|d| d.as_secs()).unwrap_or(0);
info!(
stale_writers = stale_writer_ids.len(),
drain_timeout_secs,
"ME map update covered; draining stale writers"
);
for writer_id in stale_writer_ids {
self.mark_writer_draining_with_timeout(writer_id, drain_timeout)
.await;
}
}
pub async fn update_proxy_maps( pub async fn update_proxy_maps(
&self, &self,
new_v4: HashMap<i32, Vec<(IpAddr, u16)>>, new_v4: HashMap<i32, Vec<(IpAddr, u16)>>,
@ -631,24 +725,41 @@ impl MePool {
self.registry.writer_lost(writer_id).await self.registry.writer_lost(writer_id).await
} }
pub(crate) async fn mark_writer_draining(self: &Arc<Self>, writer_id: u64) { pub(crate) async fn mark_writer_draining_with_timeout(
{ self: &Arc<Self>,
writer_id: u64,
timeout: Option<Duration>,
) {
let timeout = timeout.filter(|d| !d.is_zero());
let found = {
let mut ws = self.writers.write().await; let mut ws = self.writers.write().await;
if let Some(w) = ws.iter_mut().find(|w| w.id == writer_id) { if let Some(w) = ws.iter_mut().find(|w| w.id == writer_id) {
w.draining.store(true, Ordering::Relaxed); w.draining.store(true, Ordering::Relaxed);
true
} else {
false
} }
};
if !found {
return;
} }
let timeout_secs = timeout.map(|d| d.as_secs()).unwrap_or(0);
debug!(writer_id, timeout_secs, "ME writer marked draining");
let pool = Arc::downgrade(self); let pool = Arc::downgrade(self);
tokio::spawn(async move { tokio::spawn(async move {
let deadline = Instant::now() + Duration::from_secs(300); let deadline = timeout.map(|t| Instant::now() + t);
loop { loop {
if let Some(p) = pool.upgrade() { if let Some(p) = pool.upgrade() {
if Instant::now() >= deadline { if let Some(deadline_at) = deadline {
if Instant::now() >= deadline_at {
warn!(writer_id, "Drain timeout, force-closing"); warn!(writer_id, "Drain timeout, force-closing");
let _ = p.remove_writer_and_close_clients(writer_id).await; let _ = p.remove_writer_and_close_clients(writer_id).await;
break; break;
} }
}
if p.registry.is_writer_empty(writer_id).await { if p.registry.is_writer_empty(writer_id).await {
let _ = p.remove_writer_only(writer_id).await; let _ = p.remove_writer_only(writer_id).await;
break; break;
@ -661,6 +772,11 @@ impl MePool {
}); });
} }
pub(crate) async fn mark_writer_draining(self: &Arc<Self>, writer_id: u64) {
self.mark_writer_draining_with_timeout(writer_id, Some(Duration::from_secs(300)))
.await;
}
} }
fn hex_dump(data: &[u8]) -> String { fn hex_dump(data: &[u8]) -> String {