mirror of
https://github.com/sched-ext/scx.git
synced 2024-11-28 13:40:28 +00:00
Merge pull request #671 from hodgesds/layered-last-waker
scx_layered: Add waker stats per layer
This commit is contained in:
commit
8b14e48994
@ -75,6 +75,8 @@ enum layer_stat_idx {
|
||||
LSTAT_MIGRATION,
|
||||
LSTAT_XNUMA_MIGRATION,
|
||||
LSTAT_XLLC_MIGRATION,
|
||||
LSTAT_XLAYER_WAKE,
|
||||
LSTAT_XLAYER_REWAKE,
|
||||
NR_LSTATS,
|
||||
};
|
||||
|
||||
|
@ -371,6 +371,7 @@ struct task_ctx {
|
||||
int pid;
|
||||
int last_cpu;
|
||||
int layer;
|
||||
pid_t last_waker;
|
||||
bool refresh_layer;
|
||||
u64 layer_cpus_seq;
|
||||
struct bpf_cpumask __kptr *layered_cpumask;
|
||||
@ -1456,11 +1457,38 @@ static s32 create_cache(u32 cache_id)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
void on_wakeup(struct task_struct *p, struct task_ctx *tctx)
|
||||
{
|
||||
struct cpu_ctx *cctx;
|
||||
struct layer *layer;
|
||||
struct task_ctx *waker_tctx;
|
||||
struct task_struct *waker;
|
||||
|
||||
if (!(cctx = lookup_cpu_ctx(-1)) ||
|
||||
!(layer = lookup_layer(tctx->layer)))
|
||||
return;
|
||||
|
||||
if (!(waker = bpf_get_current_task_btf()) ||
|
||||
!(waker_tctx = lookup_task_ctx_may_fail(waker)))
|
||||
return;
|
||||
|
||||
// TODO: add handling for per layer wakers
|
||||
if (tctx->layer == waker_tctx->layer)
|
||||
return;
|
||||
|
||||
if (tctx->last_waker == waker->pid)
|
||||
lstat_inc(LSTAT_XLAYER_REWAKE, layer, cctx);
|
||||
|
||||
tctx->last_waker = waker->pid;
|
||||
lstat_inc(LSTAT_XLAYER_WAKE, layer, cctx);
|
||||
}
|
||||
|
||||
|
||||
void BPF_STRUCT_OPS(layered_runnable, struct task_struct *p, u64 enq_flags)
|
||||
{
|
||||
u64 now = bpf_ktime_get_ns();
|
||||
struct task_ctx *tctx;
|
||||
u64 now = bpf_ktime_get_ns();
|
||||
|
||||
if (!(tctx = lookup_task_ctx(p)))
|
||||
return;
|
||||
@ -1468,6 +1496,9 @@ void BPF_STRUCT_OPS(layered_runnable, struct task_struct *p, u64 enq_flags)
|
||||
tctx->runnable_at = now;
|
||||
maybe_refresh_layer(p, tctx);
|
||||
adj_load(tctx->layer, p->scx.weight, now);
|
||||
|
||||
if (enq_flags & SCX_ENQ_WAKEUP)
|
||||
on_wakeup(p, tctx);
|
||||
}
|
||||
|
||||
void BPF_STRUCT_OPS(layered_running, struct task_struct *p)
|
||||
|
@ -111,6 +111,10 @@ pub struct LayerStats {
|
||||
pub xnuma_migration: f64,
|
||||
#[stat(desc = "% migrated across LLCs")]
|
||||
pub xllc_migration: f64,
|
||||
#[stat(desc = "% wakers across layers")]
|
||||
pub xlayer_wake: f64,
|
||||
#[stat(desc = "% rewakers across layers where waker has waken the task previously")]
|
||||
pub xlayer_rewake: f64,
|
||||
#[stat(desc = "mask of allocated CPUs", _om_skip)]
|
||||
pub cpus: Vec<u32>,
|
||||
#[stat(desc = "# of CPUs assigned")]
|
||||
@ -199,6 +203,8 @@ impl LayerStats {
|
||||
yield_ignore: lstat(bpf_intf::layer_stat_idx_LSTAT_YIELD_IGNORE) as u64,
|
||||
migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_MIGRATION),
|
||||
xnuma_migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XNUMA_MIGRATION),
|
||||
xlayer_wake: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XLAYER_WAKE),
|
||||
xlayer_rewake: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XLAYER_REWAKE),
|
||||
xllc_migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XLLC_MIGRATION),
|
||||
cpus: Self::bitvec_to_u32s(&layer.cpus),
|
||||
cur_nr_cpus: layer.cpus.count_ones() as u32,
|
||||
@ -232,6 +238,15 @@ impl LayerStats {
|
||||
width = header_width,
|
||||
)?;
|
||||
|
||||
writeln!(
|
||||
w,
|
||||
" {:<width$} xlayer_wake={} xlayer_rewake={}",
|
||||
"",
|
||||
fmt_pct(self.xlayer_wake),
|
||||
fmt_pct(self.xlayer_rewake),
|
||||
width = header_width,
|
||||
)?;
|
||||
|
||||
writeln!(
|
||||
w,
|
||||
" {:<width$} keep/max/busy={}/{}/{} kick={} yield/ign={}/{}",
|
||||
|
Loading…
Reference in New Issue
Block a user