scx_lavd: kick CPU explicitly at the ops.enqueue() path

When the current task is decided to yield, we should explicitly call
scx_bpf_kick_cpu(_, SCX_KICK_PREEMPT). Setting the current task's time
slice to zero is not sufficient in this because the sched_ext core
does not call resched_curr() at the ops.enqueue() path.

Signed-off-by: Changwoo Min <changwoo@igalia.com>
This commit is contained in:
Changwoo Min 2024-10-28 14:11:53 +09:00
parent f56b79b19c
commit 5b91a525bb
2 changed files with 28 additions and 23 deletions

View File

@ -1020,7 +1020,7 @@ void BPF_STRUCT_OPS(lavd_enqueue, struct task_struct *p, u64 enq_flags)
struct task_ctx *taskc;
s32 cpu_id;
u64 dsq_id;
bool preempted = false;
bool preempted = false, yield;
/*
* Place a task to a run queue of current cpu's compute domain.
@ -1089,8 +1089,11 @@ void BPF_STRUCT_OPS(lavd_enqueue, struct task_struct *p, u64 enq_flags)
p_run = bpf_get_current_task_btf();
taskc_run = try_get_task_ctx(p_run);
if (taskc_run && !is_eligible(taskc_run))
try_yield_current_cpu(p_run, cpuc_cur, taskc_run);
if (taskc_run && !is_eligible(taskc_run)) {
yield = try_yield_current_cpu(p_run, cpuc_cur, taskc_run);
if (yield)
try_kick_cpu(cpuc_cur, cpuc_cur->last_kick_clk);
}
}
}
@ -1376,7 +1379,7 @@ void BPF_STRUCT_OPS(lavd_tick, struct task_struct *p_run)
* If a task is eligible, don't consider its being preempted.
*/
if (is_eligible(p_run))
return;
goto update_cpuperf;
/*
* Try to yield the current CPU if there is a higher priority task in
@ -1385,16 +1388,24 @@ void BPF_STRUCT_OPS(lavd_tick, struct task_struct *p_run)
cpuc_run = get_cpu_ctx();
taskc_run = get_task_ctx(p_run);
if (!cpuc_run || !taskc_run)
return;
goto update_cpuperf;
preempted = try_yield_current_cpu(p_run, cpuc_run, taskc_run);
/*
* If decided to yield, give up its time slice.
*/
if (preempted) {
p_run->scx.slice = 0;
}
/*
* Update performance target of the current CPU if the current running
* task continues to run.
*/
if (!preempted)
else {
update_cpuperf:
try_decrease_cpuperf_target(cpuc_run);
}
}
void BPF_STRUCT_OPS(lavd_runnable, struct task_struct *p, u64 enq_flags)

View File

@ -215,9 +215,8 @@ null_out:
static bool try_kick_cpu(struct cpu_ctx *victim_cpuc, u64 victim_last_kick_clk)
{
/*
* If the current CPU is a victim, we just reset the current task's
* time slice as an optimization. Othewise, kick the remote CPU for
* preemption.
* Kick a victim CPU if it is not victimized yet by another
* concurrent kick task.
*
* Kicking the victim CPU does _not_ guarantee that task @p will run on
* that CPU. Enqueuing @p to the global queue is one operation, and
@ -225,19 +224,11 @@ static bool try_kick_cpu(struct cpu_ctx *victim_cpuc, u64 victim_last_kick_clk)
* okay because, anyway, the victim CPU will run a higher-priority task
* than @p.
*/
if (bpf_get_smp_processor_id() == victim_cpuc->cpu_id) {
struct task_struct *tsk = bpf_get_current_task_btf();
tsk->scx.slice = 0;
return true;
}
bool ret;
/*
* Kick the remote victim CPU if it is not victimized yet by another
* concurrent kick task.
*/
bool ret = __sync_bool_compare_and_swap(&victim_cpuc->last_kick_clk,
victim_last_kick_clk,
bpf_ktime_get_ns());
ret = __sync_bool_compare_and_swap(&victim_cpuc->last_kick_clk,
victim_last_kick_clk,
bpf_ktime_get_ns());
if (ret)
scx_bpf_kick_cpu(victim_cpuc->cpu_id, SCX_KICK_PREEMPT);
@ -329,8 +320,6 @@ static bool try_yield_current_cpu(struct task_struct *p_run,
ret = __sync_bool_compare_and_swap(
&taskc_wait->victim_cpu,
(s32)LAVD_CPU_ID_NONE, cpu_id);
if (ret)
ret = try_kick_cpu(cpuc_run, cpuc_run->last_kick_clk);
}
/*
@ -340,6 +329,11 @@ static bool try_yield_current_cpu(struct task_struct *p_run,
}
bpf_rcu_read_unlock();
/*
* If decided to yield (ret == ture), a caller should gives up
* its time slice (at the ops.tick() path) or explictly kick a
* victim CPU.
*/
return ret;
}