scx_layered: stat reporting updates

This commit is contained in:
Tejun Heo 2024-03-12 10:48:21 -10:00
parent a642fc873b
commit a9457a408e
3 changed files with 25 additions and 25 deletions

View File

@ -51,6 +51,7 @@ enum layer_stat_idx {
LSTAT_OPEN_IDLE, LSTAT_OPEN_IDLE,
LSTAT_AFFN_VIOL, LSTAT_AFFN_VIOL,
LSTAT_PREEMPT, LSTAT_PREEMPT,
LSTAT_PREEMPT_FAIL,
LSTAT_EXCL_COLLISION, LSTAT_EXCL_COLLISION,
LSTAT_EXCL_PREEMPT, LSTAT_EXCL_PREEMPT,
NR_LSTATS, NR_LSTATS,

View File

@ -524,8 +524,10 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
preempt_cursor = (cpu + 1) % nr_possible_cpus; preempt_cursor = (cpu + 1) % nr_possible_cpus;
lstat_inc(LSTAT_PREEMPT, layer, cctx); lstat_inc(LSTAT_PREEMPT, layer, cctx);
break; return;
} }
lstat_inc(LSTAT_PREEMPT_FAIL, layer, cctx);
} }
void BPF_STRUCT_OPS(layered_dispatch, s32 cpu, struct task_struct *prev) void BPF_STRUCT_OPS(layered_dispatch, s32 cpu, struct task_struct *prev)

View File

@ -1093,6 +1093,7 @@ struct OpenMetricsStats {
l_min_exec_us: Family<Vec<(String, String)>, Gauge<i64, AtomicI64>>, l_min_exec_us: Family<Vec<(String, String)>, Gauge<i64, AtomicI64>>,
l_open_idle: Family<Vec<(String, String)>, Gauge<f64, AtomicU64>>, l_open_idle: Family<Vec<(String, String)>, Gauge<f64, AtomicU64>>,
l_preempt: Family<Vec<(String, String)>, Gauge<f64, AtomicU64>>, l_preempt: Family<Vec<(String, String)>, Gauge<f64, AtomicU64>>,
l_preempt_fail: Family<Vec<(String, String)>, Gauge<f64, AtomicU64>>,
l_affn_viol: Family<Vec<(String, String)>, Gauge<f64, AtomicU64>>, l_affn_viol: Family<Vec<(String, String)>, Gauge<f64, AtomicU64>>,
l_excl_collision: Family<Vec<(String, String)>, Gauge<f64, AtomicU64>>, l_excl_collision: Family<Vec<(String, String)>, Gauge<f64, AtomicU64>>,
l_excl_preempt: Family<Vec<(String, String)>, Gauge<f64, AtomicU64>>, l_excl_preempt: Family<Vec<(String, String)>, Gauge<f64, AtomicU64>>,
@ -1175,6 +1176,10 @@ impl OpenMetricsStats {
l_preempt, l_preempt,
"% of scheduling events that preempted other tasks" "% of scheduling events that preempted other tasks"
); );
register!(
l_preempt_fail,
"% of scheduling events that attempted to preempt other tasks but failed"
);
register!( register!(
l_affn_viol, l_affn_viol,
"% of scheduling events that violated configured policies due to CPU affinity restrictions" "% of scheduling events that violated configured policies due to CPU affinity restrictions"
@ -1606,6 +1611,10 @@ impl<'a> Scheduler<'a> {
lstat_pct(bpf_intf::layer_stat_idx_LSTAT_OPEN_IDLE) lstat_pct(bpf_intf::layer_stat_idx_LSTAT_OPEN_IDLE)
); );
let l_preempt = set!(l_preempt, lstat_pct(bpf_intf::layer_stat_idx_LSTAT_PREEMPT)); let l_preempt = set!(l_preempt, lstat_pct(bpf_intf::layer_stat_idx_LSTAT_PREEMPT));
let l_preempt_fail = set!(
l_preempt_fail,
lstat_pct(bpf_intf::layer_stat_idx_LSTAT_PREEMPT_FAIL)
);
let l_affn_viol = set!( let l_affn_viol = set!(
l_affn_viol, l_affn_viol,
lstat_pct(bpf_intf::layer_stat_idx_LSTAT_AFFN_VIOL) lstat_pct(bpf_intf::layer_stat_idx_LSTAT_AFFN_VIOL)
@ -1633,15 +1642,23 @@ impl<'a> Scheduler<'a> {
width = header_width, width = header_width,
); );
info!( info!(
" {:<width$} tot={:7} local={} open_idle={} preempt={} affn_viol={}", " {:<width$} tot={:7} local={} open_idle={} affn_viol={}",
"", "",
l_total.get(), l_total.get(),
fmt_pct(l_local.get()), fmt_pct(l_local.get()),
fmt_pct(l_open_idle.get()), fmt_pct(l_open_idle.get()),
fmt_pct(l_preempt.get()),
fmt_pct(l_affn_viol.get()), fmt_pct(l_affn_viol.get()),
width = header_width, width = header_width,
); );
info!(
" {:<width$} preempt/fail={}/{} min_exec={}/{:7.2}ms",
"",
fmt_pct(l_preempt.get()),
fmt_pct(l_preempt_fail.get()),
fmt_pct(l_min_exec.get()),
l_min_exec_us.get() as f64 / 1000.0,
width = header_width,
);
info!( info!(
" {:<width$} cpus={:3} [{:3},{:3}] {}", " {:<width$} cpus={:3} [{:3},{:3}] {}",
"", "",
@ -1651,27 +1668,6 @@ impl<'a> Scheduler<'a> {
format_bitvec(&layer.cpus), format_bitvec(&layer.cpus),
width = header_width width = header_width
); );
match &layer.kind {
LayerKind::Confined { min_exec_us, .. }
| LayerKind::Grouped { min_exec_us, .. }
| LayerKind::Open { min_exec_us, .. } => {
if *min_exec_us > 0 {
info!(
" {:<width$} min_exec={} min_exec_ms={:7.2}",
"",
fmt_pct(l_min_exec.get()),
l_min_exec_us.get() as f64 / 1000.0,
width = header_width,
);
} else if l_min_exec.get() != 0.0 || l_min_exec_us.get() != 0 {
warn!(
"min_exec_us is off but min_exec={} min_exec_ms={:7.2}",
fmt_pct(l_min_exec.get()),
l_min_exec_us.get() as f64 / 1000.0,
);
}
}
}
match &layer.kind { match &layer.kind {
LayerKind::Grouped { exclusive, .. } | LayerKind::Open { exclusive, .. } => { LayerKind::Grouped { exclusive, .. } | LayerKind::Open { exclusive, .. } => {
if *exclusive { if *exclusive {
@ -1684,7 +1680,8 @@ impl<'a> Scheduler<'a> {
); );
} else if l_excl_collision.get() != 0.0 || l_excl_preempt.get() != 0.0 { } else if l_excl_collision.get() != 0.0 || l_excl_preempt.get() != 0.0 {
warn!( warn!(
"exclusive is off but excl_coll={} excl_preempt={}", "{}: exclusive is off but excl_coll={} excl_preempt={}",
spec.name,
fmt_pct(l_excl_collision.get()), fmt_pct(l_excl_collision.get()),
fmt_pct(l_excl_preempt.get()), fmt_pct(l_excl_preempt.get()),
); );