From d6ac5fbd9cb6dd1931f31b607cec29a8deb4bf48 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 21 Aug 2024 13:13:59 -1000 Subject: [PATCH] scx_layered: Drop SCX_OPS_ENQ_LAST The meaning of SCX_OPS_ENQ_LAST will change with future kernel updates and enqueueing on local DSQ will no longer be sufficient to avoid stalls. No reason to do it anyway. Just drop it. --- scheds/rust/scx_layered/src/bpf/intf.h | 1 - scheds/rust/scx_layered/src/bpf/main.bpf.c | 7 ------- scheds/rust/scx_layered/src/stats.rs | 8 +------- 3 files changed, 1 insertion(+), 15 deletions(-) diff --git a/scheds/rust/scx_layered/src/bpf/intf.h b/scheds/rust/scx_layered/src/bpf/intf.h index 59320d2..a58b16e 100644 --- a/scheds/rust/scx_layered/src/bpf/intf.h +++ b/scheds/rust/scx_layered/src/bpf/intf.h @@ -53,7 +53,6 @@ enum layer_stat_idx { LSTAT_SEL_LOCAL, LSTAT_ENQ_WAKEUP, LSTAT_ENQ_EXPIRE, - LSTAT_ENQ_LAST, LSTAT_ENQ_REENQ, LSTAT_MIN_EXEC, LSTAT_MIN_EXEC_NS, diff --git a/scheds/rust/scx_layered/src/bpf/main.bpf.c b/scheds/rust/scx_layered/src/bpf/main.bpf.c index 9fadfeb..2192d74 100644 --- a/scheds/rust/scx_layered/src/bpf/main.bpf.c +++ b/scheds/rust/scx_layered/src/bpf/main.bpf.c @@ -667,12 +667,6 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags) if (enq_flags & SCX_ENQ_REENQ) { lstat_inc(LSTAT_ENQ_REENQ, layer, cctx); } else { - if (enq_flags & SCX_ENQ_LAST) { - lstat_inc(LSTAT_ENQ_LAST, layer, cctx); - scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_ns, 0); - return; - } - if (enq_flags & SCX_ENQ_WAKEUP) lstat_inc(LSTAT_ENQ_WAKEUP, layer, cctx); else @@ -1698,5 +1692,4 @@ SCX_OPS_DEFINE(layered, .dump = (void *)layered_dump, .init = (void *)layered_init, .exit = (void *)layered_exit, - .flags = SCX_OPS_ENQ_LAST, .name = "layered"); diff --git a/scheds/rust/scx_layered/src/stats.rs b/scheds/rust/scx_layered/src/stats.rs index 8cd88c1..e75f386 100644 --- a/scheds/rust/scx_layered/src/stats.rs +++ b/scheds/rust/scx_layered/src/stats.rs @@ -70,8 +70,6 @@ pub struct LayerStats { pub enq_wakeup: f64, #[stat(desc = "layer: % enqueued after slice expiration")] pub enq_expire: f64, - #[stat(desc = "layer: % enqueued as last runnable task on CPU")] - pub enq_last: f64, #[stat(desc = "layer: % re-enqueued due to RT preemption")] pub enq_reenq: f64, #[stat(desc = "layer: # times exec duration < min_exec_us")] @@ -148,7 +146,6 @@ impl LayerStats { let ltotal = lstat(bpf_intf::layer_stat_idx_LSTAT_SEL_LOCAL) + lstat(bpf_intf::layer_stat_idx_LSTAT_ENQ_WAKEUP) + lstat(bpf_intf::layer_stat_idx_LSTAT_ENQ_EXPIRE) - + lstat(bpf_intf::layer_stat_idx_LSTAT_ENQ_LAST) + lstat(bpf_intf::layer_stat_idx_LSTAT_ENQ_REENQ); let lstat_pct = |sidx| { if ltotal != 0 { @@ -181,7 +178,6 @@ impl LayerStats { sel_local: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_SEL_LOCAL), enq_wakeup: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_ENQ_WAKEUP), enq_expire: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_ENQ_EXPIRE), - enq_last: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_ENQ_LAST), enq_reenq: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_ENQ_REENQ), min_exec: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_MIN_EXEC), min_exec_us: (lstat(bpf_intf::layer_stat_idx_LSTAT_MIN_EXEC_NS) / 1000) as u64, @@ -223,13 +219,12 @@ impl LayerStats { writeln!( w, - " {: