scx_lavd: properly check for idle CPUs in pick_cpu()

It seems that we are not updating `is_idle` when we find an idle CPU
with pick_cpu(), causing unnecessary rescheduling events when
select_cpu() is called.

To resolve this, ensure that the is_idle state is correctly set.
Additionally, always ensure that the task is dispatched to the local DSQ
immediately upon finding (and reserving) an idle CPU.

Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
This commit is contained in:
Andrea Righi 2024-06-18 17:30:15 +02:00
parent 819ffd527f
commit bad9ed13ef

View File

@ -2174,6 +2174,7 @@ static s32 pick_cpu(struct task_struct *p, struct task_ctx *taskc,
if (bpf_cpumask_test_cpu(prev_cpu, cast_mask(a_cpumask)) &&
scx_bpf_test_and_clear_cpu_idle(prev_cpu)) {
cpu_id = prev_cpu;
*is_idle = true;
goto unlock_out;
}
@ -2181,16 +2182,20 @@ static s32 pick_cpu(struct task_struct *p, struct task_ctx *taskc,
* Next, pick a fully idle core among active CPUs.
*/
cpu_id = scx_bpf_pick_idle_cpu(cast_mask(a_cpumask), SCX_PICK_IDLE_CORE);
if (cpu_id >= 0)
if (cpu_id >= 0) {
*is_idle = true;
goto unlock_out;
}
/*
* Then, pick an any idle core among active CPUs even if its hypertwin
* is in use.
*/
cpu_id = scx_bpf_pick_idle_cpu(cast_mask(a_cpumask), 0);
if (cpu_id >= 0)
if (cpu_id >= 0) {
*is_idle = true;
goto unlock_out;
}
/*
* Then, pick an any idle core among overflow CPUs.
@ -2198,8 +2203,10 @@ static s32 pick_cpu(struct task_struct *p, struct task_ctx *taskc,
bpf_cpumask_and(o_cpumask, p->cpus_ptr, cast_mask(ovrflw));
cpu_id = scx_bpf_pick_idle_cpu(cast_mask(o_cpumask), 0);
if (cpu_id >= 0)
if (cpu_id >= 0) {
*is_idle = true;
goto unlock_out;
}
/*
* Next, if there is no idle core under our control, pick random core
@ -2257,8 +2264,10 @@ s32 BPF_STRUCT_OPS(lavd_select_cpu, struct task_struct *p, s32 prev_cpu,
*/
if (!is_wakeup_wf(wake_flags)) {
cpu_id = pick_cpu(p, taskc, prev_cpu, wake_flags, &found_idle);
if (found_idle)
if (found_idle) {
put_local_rq_no_fail(p, taskc, 0);
return cpu_id;
}
goto try_yield_out;
}