scx_nest: Apply r_impatient if no task is found in primary nest

Julia pointed out that our current implementation of r_impatient is
incorrect. r_impatient is meant to be a mechanism for more aggressively
growing the primary nest if a task repeatedly isn't able to find a core.
Right now, we trigger r_impatient if we're not able to find an attached
or previous core in the primary nest, but we _should_ be triggering it
only if we're unable to find _any_ core in the primary nest. Fixing the
implementation to do this drastically decreases how aggressively we grow
the primary nest when r_impatient is in effect.

Reported-by: Julia Lawall <julia.lawall@inria.fr>
Signed-off-by: David Vernet <void@manifault.com>
This commit is contained in:
David Vernet 2023-12-18 11:00:58 -06:00
parent 239d5d1d2c
commit ab0e36f9ce
No known key found for this signature in database
GPG Key ID: 59E4B86965C4F364

View File

@ -207,7 +207,7 @@ s32 BPF_STRUCT_OPS(nest_select_cpu, struct task_struct *p, s32 prev_cpu,
s32 cpu; s32 cpu;
struct task_ctx *tctx; struct task_ctx *tctx;
struct pcpu_ctx *pcpu_ctx; struct pcpu_ctx *pcpu_ctx;
bool direct_to_primary = false; bool direct_to_primary = false, reset_impatient = true;
tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0); tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
if (!tctx) if (!tctx)
@ -232,7 +232,6 @@ s32 BPF_STRUCT_OPS(nest_select_cpu, struct task_struct *p, s32 prev_cpu,
if (bpf_cpumask_test_cpu(tctx->attached_core, cast_mask(p_mask)) && if (bpf_cpumask_test_cpu(tctx->attached_core, cast_mask(p_mask)) &&
scx_bpf_test_and_clear_cpu_idle(tctx->attached_core)) { scx_bpf_test_and_clear_cpu_idle(tctx->attached_core)) {
cpu = tctx->attached_core; cpu = tctx->attached_core;
tctx->prev_misses = 0;
stat_inc(NEST_STAT(WAKEUP_ATTACHED)); stat_inc(NEST_STAT(WAKEUP_ATTACHED));
goto migrate_primary; goto migrate_primary;
} }
@ -246,18 +245,10 @@ s32 BPF_STRUCT_OPS(nest_select_cpu, struct task_struct *p, s32 prev_cpu,
bpf_cpumask_test_cpu(prev_cpu, cast_mask(p_mask)) && bpf_cpumask_test_cpu(prev_cpu, cast_mask(p_mask)) &&
scx_bpf_test_and_clear_cpu_idle(prev_cpu)) { scx_bpf_test_and_clear_cpu_idle(prev_cpu)) {
cpu = prev_cpu; cpu = prev_cpu;
tctx->prev_misses = 0;
stat_inc(NEST_STAT(WAKEUP_PREV_PRIMARY)); stat_inc(NEST_STAT(WAKEUP_PREV_PRIMARY));
goto migrate_primary; goto migrate_primary;
} }
if (r_impatient > 0 && ++tctx->prev_misses >= r_impatient) {
direct_to_primary = true;
tctx->prev_misses = 0;
stat_inc(NEST_STAT(TASK_IMPATIENT));
goto search_reserved;
}
if (find_fully_idle) { if (find_fully_idle) {
/* Then try any fully idle core in primary. */ /* Then try any fully idle core in primary. */
cpu = scx_bpf_pick_idle_cpu(cast_mask(p_mask), cpu = scx_bpf_pick_idle_cpu(cast_mask(p_mask),
@ -275,7 +266,14 @@ s32 BPF_STRUCT_OPS(nest_select_cpu, struct task_struct *p, s32 prev_cpu,
goto migrate_primary; goto migrate_primary;
} }
search_reserved: if (r_impatient > 0 && ++tctx->prev_misses >= r_impatient) {
direct_to_primary = true;
tctx->prev_misses = 0;
stat_inc(NEST_STAT(TASK_IMPATIENT));
}
reset_impatient = false;
/* Then try any fully idle core in reserve. */ /* Then try any fully idle core in reserve. */
bpf_cpumask_and(p_mask, p->cpus_ptr, cast_mask(reserve)); bpf_cpumask_and(p_mask, p->cpus_ptr, cast_mask(reserve));
if (find_fully_idle) { if (find_fully_idle) {
@ -336,6 +334,8 @@ search_reserved:
promote_to_primary: promote_to_primary:
stat_inc(NEST_STAT(PROMOTED_TO_PRIMARY)); stat_inc(NEST_STAT(PROMOTED_TO_PRIMARY));
migrate_primary: migrate_primary:
if (reset_impatient)
tctx->prev_misses = 0;
pcpu_ctx = bpf_map_lookup_elem(&pcpu_ctxs, &cpu); pcpu_ctx = bpf_map_lookup_elem(&pcpu_ctxs, &cpu);
if (pcpu_ctx) { if (pcpu_ctx) {
if (pcpu_ctx->scheduled_compaction) { if (pcpu_ctx->scheduled_compaction) {