Conntrack Control for Docker

This commit is contained in:
Alexey
2026-04-17 19:06:18 +03:00
parent 3ca3e8ff0e
commit 093faed0c2
7 changed files with 152 additions and 35 deletions

View File

@@ -82,6 +82,32 @@ HEALTHCHECK --interval=30s --timeout=5s --start-period=20s --retries=3 CMD ["/ap
ENTRYPOINT ["/app/telemt"]
CMD ["config.toml"]
# ==========================
# Production Netfilter Profile
# ==========================
FROM debian:12-slim AS prod-netfilter
RUN set -eux; \
apt-get update; \
apt-get install -y --no-install-recommends \
ca-certificates \
conntrack \
nftables \
iptables; \
rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY --from=minimal /telemt /app/telemt
COPY config.toml /app/config.toml
EXPOSE 443 9090 9091
HEALTHCHECK --interval=30s --timeout=5s --start-period=20s --retries=3 CMD ["/app/telemt", "healthcheck", "/app/config.toml", "--mode", "liveness"]
ENTRYPOINT ["/app/telemt"]
CMD ["config.toml"]
# ==========================
# Production Distroless on MUSL
# ==========================

View File

@@ -0,0 +1,10 @@
services:
telemt:
build:
context: .
target: prod-netfilter
network_mode: host
ports: []
cap_add:
- NET_BIND_SERVICE
- NET_ADMIN

View File

@@ -0,0 +1,8 @@
services:
telemt:
build:
context: .
target: prod-netfilter
cap_add:
- NET_BIND_SERVICE
- NET_ADMIN

View File

@@ -1,7 +1,9 @@
services:
telemt:
image: ghcr.io/telemt/telemt:latest
build: .
build:
context: .
target: prod
container_name: telemt
restart: unless-stopped
ports:
@@ -28,7 +30,6 @@ services:
- ALL
cap_add:
- NET_BIND_SERVICE
- NET_ADMIN
read_only: true
security_opt:
- no-new-privileges:true

View File

@@ -343,6 +343,10 @@ impl ProxyConfig {
let network_table = parsed_toml
.get("network")
.and_then(|value| value.as_table());
let server_table = parsed_toml.get("server").and_then(|value| value.as_table());
let conntrack_control_table = server_table
.and_then(|table| table.get("conntrack_control"))
.and_then(|value| value.as_table());
let update_every_is_explicit = general_table
.map(|table| table.contains_key("update_every"))
.unwrap_or(false);
@@ -372,10 +376,17 @@ impl ProxyConfig {
let stun_servers_is_explicit = network_table
.map(|table| table.contains_key("stun_servers"))
.unwrap_or(false);
let inline_conntrack_control_is_explicit = conntrack_control_table
.map(|table| table.contains_key("inline_conntrack_control"))
.unwrap_or(false);
let mut config: ProxyConfig = parsed_toml
.try_into()
.map_err(|e| ProxyError::Config(e.to_string()))?;
config
.server
.conntrack_control
.inline_conntrack_control_explicit = inline_conntrack_control_is_explicit;
if !update_every_is_explicit && (legacy_secret_is_explicit || legacy_config_is_explicit) {
config.general.update_every = None;
@@ -1881,6 +1892,35 @@ mod tests {
);
}
#[test]
fn conntrack_inline_explicit_flag_is_false_when_omitted() {
let cfg = load_config_from_temp_toml(
r#"
[general]
[network]
[server]
[server.conntrack_control]
[access]
"#,
);
assert!(!cfg.server.conntrack_control.inline_conntrack_control_explicit);
}
#[test]
fn conntrack_inline_explicit_flag_is_true_when_present() {
let cfg = load_config_from_temp_toml(
r#"
[general]
[network]
[server]
[server.conntrack_control]
inline_conntrack_control = true
[access]
"#,
);
assert!(cfg.server.conntrack_control.inline_conntrack_control_explicit);
}
#[test]
fn unknown_sni_action_parses_and_defaults_to_drop() {
let cfg_default: ProxyConfig = toml::from_str(

View File

@@ -1329,6 +1329,10 @@ pub struct ConntrackControlConfig {
#[serde(default = "default_conntrack_control_enabled")]
pub inline_conntrack_control: bool,
/// Tracks whether inline_conntrack_control was explicitly set in config.
#[serde(skip)]
pub inline_conntrack_control_explicit: bool,
/// Conntrack mode for listener ingress traffic.
#[serde(default)]
pub mode: ConntrackMode,
@@ -1363,6 +1367,7 @@ impl Default for ConntrackControlConfig {
fn default() -> Self {
Self {
inline_conntrack_control: default_conntrack_control_enabled(),
inline_conntrack_control_explicit: false,
mode: ConntrackMode::default(),
backend: ConntrackBackend::default(),
profile: ConntrackPressureProfile::default(),

View File

@@ -24,6 +24,13 @@ enum NetfilterBackend {
Iptables,
}
#[derive(Clone, Copy)]
struct ConntrackRuntimeSupport {
netfilter_backend: Option<NetfilterBackend>,
has_cap_net_admin: bool,
has_conntrack_binary: bool,
}
#[derive(Clone, Copy)]
struct PressureSample {
conn_pct: Option<u8>,
@@ -56,11 +63,8 @@ pub(crate) fn spawn_conntrack_controller(
shared: Arc<ProxySharedState>,
) {
if !cfg!(target_os = "linux") {
let enabled = config_rx
.borrow()
.server
.conntrack_control
.inline_conntrack_control;
let cfg = config_rx.borrow();
let enabled = cfg.server.conntrack_control.inline_conntrack_control;
stats.set_conntrack_control_enabled(enabled);
stats.set_conntrack_control_available(false);
stats.set_conntrack_pressure_active(false);
@@ -68,9 +72,9 @@ pub(crate) fn spawn_conntrack_controller(
stats.set_conntrack_rule_apply_ok(false);
shared.disable_conntrack_close_sender();
shared.set_conntrack_pressure_active(false);
if enabled {
if enabled && cfg.server.conntrack_control.inline_conntrack_control_explicit {
warn!(
"conntrack control is configured but unsupported on this OS; disabling runtime worker"
"conntrack control explicitly enabled but unsupported on this OS; disabling runtime worker"
);
}
return;
@@ -92,16 +96,17 @@ async fn run_conntrack_controller(
let mut cfg = config_rx.borrow().clone();
let mut pressure_state = PressureState::new(stats.as_ref());
let mut delete_budget_tokens = cfg.server.conntrack_control.delete_budget_per_sec;
let mut backend = pick_backend(cfg.server.conntrack_control.backend);
let mut runtime_support = probe_runtime_support(cfg.server.conntrack_control.backend);
let mut effective_enabled = effective_conntrack_enabled(&cfg, runtime_support);
apply_runtime_state(
stats.as_ref(),
shared.as_ref(),
&cfg,
backend.is_some(),
runtime_support,
false,
);
reconcile_rules(&cfg, backend, stats.as_ref()).await;
reconcile_rules(&cfg, runtime_support, stats.as_ref()).await;
loop {
tokio::select! {
@@ -110,17 +115,18 @@ async fn run_conntrack_controller(
break;
}
cfg = config_rx.borrow_and_update().clone();
backend = pick_backend(cfg.server.conntrack_control.backend);
runtime_support = probe_runtime_support(cfg.server.conntrack_control.backend);
effective_enabled = effective_conntrack_enabled(&cfg, runtime_support);
delete_budget_tokens = cfg.server.conntrack_control.delete_budget_per_sec;
apply_runtime_state(stats.as_ref(), shared.as_ref(), &cfg, backend.is_some(), pressure_state.active);
reconcile_rules(&cfg, backend, stats.as_ref()).await;
apply_runtime_state(stats.as_ref(), shared.as_ref(), &cfg, runtime_support, pressure_state.active);
reconcile_rules(&cfg, runtime_support, stats.as_ref()).await;
}
event = close_rx.recv() => {
let Some(event) = event else {
break;
};
stats.set_conntrack_event_queue_depth(close_rx.len() as u64);
if !cfg.server.conntrack_control.inline_conntrack_control {
if !effective_enabled {
continue;
}
if !pressure_state.active {
@@ -156,6 +162,7 @@ async fn run_conntrack_controller(
stats.as_ref(),
shared.as_ref(),
&cfg,
effective_enabled,
&sample,
&mut pressure_state,
);
@@ -175,20 +182,24 @@ fn apply_runtime_state(
stats: &Stats,
shared: &ProxySharedState,
cfg: &ProxyConfig,
backend_available: bool,
runtime_support: ConntrackRuntimeSupport,
pressure_active: bool,
) {
let enabled = cfg.server.conntrack_control.inline_conntrack_control;
let available = enabled && backend_available && has_cap_net_admin();
if enabled && !available {
let available = effective_conntrack_enabled(cfg, runtime_support);
if enabled && !available && cfg.server.conntrack_control.inline_conntrack_control_explicit {
warn!(
"conntrack control enabled but unavailable (missing CAP_NET_ADMIN or backend binaries)"
has_cap_net_admin = runtime_support.has_cap_net_admin,
backend_available = runtime_support.netfilter_backend.is_some(),
conntrack_binary_available = runtime_support.has_conntrack_binary,
configured_backend = ?cfg.server.conntrack_control.backend,
"conntrack control explicitly enabled but unavailable; disabling runtime features"
);
}
stats.set_conntrack_control_enabled(enabled);
stats.set_conntrack_control_available(available);
shared.set_conntrack_pressure_active(enabled && pressure_active);
stats.set_conntrack_pressure_active(enabled && pressure_active);
shared.set_conntrack_pressure_active(available && pressure_active);
stats.set_conntrack_pressure_active(available && pressure_active);
}
fn collect_pressure_sample(
@@ -228,10 +239,11 @@ fn update_pressure_state(
stats: &Stats,
shared: &ProxySharedState,
cfg: &ProxyConfig,
effective_enabled: bool,
sample: &PressureSample,
state: &mut PressureState,
) {
if !cfg.server.conntrack_control.inline_conntrack_control {
if !effective_enabled {
if state.active {
state.active = false;
state.low_streak = 0;
@@ -285,22 +297,22 @@ fn update_pressure_state(
state.low_streak = 0;
}
async fn reconcile_rules(cfg: &ProxyConfig, backend: Option<NetfilterBackend>, stats: &Stats) {
async fn reconcile_rules(cfg: &ProxyConfig, runtime_support: ConntrackRuntimeSupport, stats: &Stats) {
if !cfg.server.conntrack_control.inline_conntrack_control {
clear_notrack_rules_all_backends().await;
stats.set_conntrack_rule_apply_ok(true);
return;
}
if !has_cap_net_admin() {
if !effective_conntrack_enabled(cfg, runtime_support) {
clear_notrack_rules_all_backends().await;
stats.set_conntrack_rule_apply_ok(false);
return;
}
let Some(backend) = backend else {
stats.set_conntrack_rule_apply_ok(false);
return;
};
let backend = runtime_support
.netfilter_backend
.expect("netfilter backend must be available for effective conntrack control");
let apply_result = match backend {
NetfilterBackend::Nftables => apply_nft_rules(cfg).await,
@@ -315,6 +327,21 @@ async fn reconcile_rules(cfg: &ProxyConfig, backend: Option<NetfilterBackend>, s
}
}
fn probe_runtime_support(configured_backend: ConntrackBackend) -> ConntrackRuntimeSupport {
ConntrackRuntimeSupport {
netfilter_backend: pick_backend(configured_backend),
has_cap_net_admin: has_cap_net_admin(),
has_conntrack_binary: command_exists("conntrack"),
}
}
fn effective_conntrack_enabled(cfg: &ProxyConfig, runtime_support: ConntrackRuntimeSupport) -> bool {
cfg.server.conntrack_control.inline_conntrack_control
&& runtime_support.has_cap_net_admin
&& runtime_support.netfilter_backend.is_some()
&& runtime_support.has_conntrack_binary
}
fn pick_backend(configured: ConntrackBackend) -> Option<NetfilterBackend> {
match configured {
ConntrackBackend::Auto => {
@@ -710,7 +737,7 @@ mod tests {
me_queue_pressure_delta: 0,
};
update_pressure_state(&stats, shared.as_ref(), &cfg, &sample, &mut state);
update_pressure_state(&stats, shared.as_ref(), &cfg, true, &sample, &mut state);
assert!(state.active);
assert!(shared.conntrack_pressure_active());
@@ -731,7 +758,7 @@ mod tests {
accept_timeout_delta: 0,
me_queue_pressure_delta: 0,
};
update_pressure_state(&stats, shared.as_ref(), &cfg, &high_sample, &mut state);
update_pressure_state(&stats, shared.as_ref(), &cfg, true, &high_sample, &mut state);
assert!(state.active);
let low_sample = PressureSample {
@@ -740,11 +767,11 @@ mod tests {
accept_timeout_delta: 0,
me_queue_pressure_delta: 0,
};
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
update_pressure_state(&stats, shared.as_ref(), &cfg, true, &low_sample, &mut state);
assert!(state.active);
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
update_pressure_state(&stats, shared.as_ref(), &cfg, true, &low_sample, &mut state);
assert!(state.active);
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
update_pressure_state(&stats, shared.as_ref(), &cfg, true, &low_sample, &mut state);
assert!(!state.active);
assert!(!shared.conntrack_pressure_active());
@@ -765,7 +792,7 @@ mod tests {
me_queue_pressure_delta: 10,
};
update_pressure_state(&stats, shared.as_ref(), &cfg, &sample, &mut state);
update_pressure_state(&stats, shared.as_ref(), &cfg, false, &sample, &mut state);
assert!(!state.active);
assert!(!shared.conntrack_pressure_active());