mirror of
https://github.com/sched-ext/scx.git
synced 2024-11-28 21:50:23 +00:00
Merge branch 'sched-ext:main' into scx_loader_automatic
This commit is contained in:
commit
ab1c737e9e
@ -75,6 +75,8 @@ enum layer_stat_idx {
|
|||||||
LSTAT_MIGRATION,
|
LSTAT_MIGRATION,
|
||||||
LSTAT_XNUMA_MIGRATION,
|
LSTAT_XNUMA_MIGRATION,
|
||||||
LSTAT_XLLC_MIGRATION,
|
LSTAT_XLLC_MIGRATION,
|
||||||
|
LSTAT_XLAYER_WAKE,
|
||||||
|
LSTAT_XLAYER_REWAKE,
|
||||||
NR_LSTATS,
|
NR_LSTATS,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -371,6 +371,7 @@ struct task_ctx {
|
|||||||
int pid;
|
int pid;
|
||||||
int last_cpu;
|
int last_cpu;
|
||||||
int layer;
|
int layer;
|
||||||
|
pid_t last_waker;
|
||||||
bool refresh_layer;
|
bool refresh_layer;
|
||||||
u64 layer_cpus_seq;
|
u64 layer_cpus_seq;
|
||||||
struct bpf_cpumask __kptr *layered_cpumask;
|
struct bpf_cpumask __kptr *layered_cpumask;
|
||||||
@ -1456,11 +1457,38 @@ static s32 create_cache(u32 cache_id)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline
|
||||||
|
void on_wakeup(struct task_struct *p, struct task_ctx *tctx)
|
||||||
|
{
|
||||||
|
struct cpu_ctx *cctx;
|
||||||
|
struct layer *layer;
|
||||||
|
struct task_ctx *waker_tctx;
|
||||||
|
struct task_struct *waker;
|
||||||
|
|
||||||
|
if (!(cctx = lookup_cpu_ctx(-1)) ||
|
||||||
|
!(layer = lookup_layer(tctx->layer)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!(waker = bpf_get_current_task_btf()) ||
|
||||||
|
!(waker_tctx = lookup_task_ctx_may_fail(waker)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
// TODO: add handling for per layer wakers
|
||||||
|
if (tctx->layer == waker_tctx->layer)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (tctx->last_waker == waker->pid)
|
||||||
|
lstat_inc(LSTAT_XLAYER_REWAKE, layer, cctx);
|
||||||
|
|
||||||
|
tctx->last_waker = waker->pid;
|
||||||
|
lstat_inc(LSTAT_XLAYER_WAKE, layer, cctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void BPF_STRUCT_OPS(layered_runnable, struct task_struct *p, u64 enq_flags)
|
void BPF_STRUCT_OPS(layered_runnable, struct task_struct *p, u64 enq_flags)
|
||||||
{
|
{
|
||||||
u64 now = bpf_ktime_get_ns();
|
|
||||||
struct task_ctx *tctx;
|
struct task_ctx *tctx;
|
||||||
|
u64 now = bpf_ktime_get_ns();
|
||||||
|
|
||||||
if (!(tctx = lookup_task_ctx(p)))
|
if (!(tctx = lookup_task_ctx(p)))
|
||||||
return;
|
return;
|
||||||
@ -1468,6 +1496,9 @@ void BPF_STRUCT_OPS(layered_runnable, struct task_struct *p, u64 enq_flags)
|
|||||||
tctx->runnable_at = now;
|
tctx->runnable_at = now;
|
||||||
maybe_refresh_layer(p, tctx);
|
maybe_refresh_layer(p, tctx);
|
||||||
adj_load(tctx->layer, p->scx.weight, now);
|
adj_load(tctx->layer, p->scx.weight, now);
|
||||||
|
|
||||||
|
if (enq_flags & SCX_ENQ_WAKEUP)
|
||||||
|
on_wakeup(p, tctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BPF_STRUCT_OPS(layered_running, struct task_struct *p)
|
void BPF_STRUCT_OPS(layered_running, struct task_struct *p)
|
||||||
@ -1770,6 +1801,7 @@ void BPF_STRUCT_OPS(layered_dump, struct scx_dump_ctx *dctx)
|
|||||||
scx_bpf_dump("LAYER[%d]DSQ[%d] nr_cpus=%u nr_queued=%d -%llums cpus=",
|
scx_bpf_dump("LAYER[%d]DSQ[%d] nr_cpus=%u nr_queued=%d -%llums cpus=",
|
||||||
i, idx, layers[i].nr_cpus, scx_bpf_dsq_nr_queued(idx),
|
i, idx, layers[i].nr_cpus, scx_bpf_dsq_nr_queued(idx),
|
||||||
dsq_first_runnable_for_ms(idx, now));
|
dsq_first_runnable_for_ms(idx, now));
|
||||||
|
scx_bpf_dump("\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dump_layer_cpumask(i);
|
dump_layer_cpumask(i);
|
||||||
|
@ -111,6 +111,10 @@ pub struct LayerStats {
|
|||||||
pub xnuma_migration: f64,
|
pub xnuma_migration: f64,
|
||||||
#[stat(desc = "% migrated across LLCs")]
|
#[stat(desc = "% migrated across LLCs")]
|
||||||
pub xllc_migration: f64,
|
pub xllc_migration: f64,
|
||||||
|
#[stat(desc = "% wakers across layers")]
|
||||||
|
pub xlayer_wake: f64,
|
||||||
|
#[stat(desc = "% rewakers across layers where waker has waken the task previously")]
|
||||||
|
pub xlayer_rewake: f64,
|
||||||
#[stat(desc = "mask of allocated CPUs", _om_skip)]
|
#[stat(desc = "mask of allocated CPUs", _om_skip)]
|
||||||
pub cpus: Vec<u32>,
|
pub cpus: Vec<u32>,
|
||||||
#[stat(desc = "# of CPUs assigned")]
|
#[stat(desc = "# of CPUs assigned")]
|
||||||
@ -199,6 +203,8 @@ impl LayerStats {
|
|||||||
yield_ignore: lstat(bpf_intf::layer_stat_idx_LSTAT_YIELD_IGNORE) as u64,
|
yield_ignore: lstat(bpf_intf::layer_stat_idx_LSTAT_YIELD_IGNORE) as u64,
|
||||||
migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_MIGRATION),
|
migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_MIGRATION),
|
||||||
xnuma_migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XNUMA_MIGRATION),
|
xnuma_migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XNUMA_MIGRATION),
|
||||||
|
xlayer_wake: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XLAYER_WAKE),
|
||||||
|
xlayer_rewake: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XLAYER_REWAKE),
|
||||||
xllc_migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XLLC_MIGRATION),
|
xllc_migration: lstat_pct(bpf_intf::layer_stat_idx_LSTAT_XLLC_MIGRATION),
|
||||||
cpus: Self::bitvec_to_u32s(&layer.cpus),
|
cpus: Self::bitvec_to_u32s(&layer.cpus),
|
||||||
cur_nr_cpus: layer.cpus.count_ones() as u32,
|
cur_nr_cpus: layer.cpus.count_ones() as u32,
|
||||||
@ -232,6 +238,15 @@ impl LayerStats {
|
|||||||
width = header_width,
|
width = header_width,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
writeln!(
|
||||||
|
w,
|
||||||
|
" {:<width$} xlayer_wake={} xlayer_rewake={}",
|
||||||
|
"",
|
||||||
|
fmt_pct(self.xlayer_wake),
|
||||||
|
fmt_pct(self.xlayer_rewake),
|
||||||
|
width = header_width,
|
||||||
|
)?;
|
||||||
|
|
||||||
writeln!(
|
writeln!(
|
||||||
w,
|
w,
|
||||||
" {:<width$} keep/max/busy={}/{}/{} kick={} yield/ign={}/{}",
|
" {:<width$} keep/max/busy={}/{}/{} kick={} yield/ign={}/{}",
|
||||||
|
Loading…
Reference in New Issue
Block a user