scx_rustland_core: get rid of the SCX_ENQ_WAKEUP logic

With user-space scheduling we don't usually dispatch a task immediately
after selecting an idle CPU, so there's not much benefit at trying to
optimize the WAKE_SYNC scenario (when a task is waking up another task
and releaing the CPU) when picking an idle CPU.

Therefore, get rid of the WAKE_SYNC logic in select_cpu() and rely on
the user-space logic (that has access to the WAKE_SYNC information) to
handle this particular case.

Signed-off-by: Andrea Righi <andrea.righi@linux.dev>
This commit is contained in:
Andrea Righi 2024-10-16 11:23:50 +02:00
parent 67ec1af5cf
commit 704fe95f51

View File

@ -423,7 +423,7 @@ static inline u64 task_slice(struct task_struct *p)
* to handle these mistakes in favor of a more efficient response and a reduced * to handle these mistakes in favor of a more efficient response and a reduced
* scheduling overhead. * scheduling overhead.
*/ */
static s32 pick_idle_cpu(struct task_struct *p, s32 prev_cpu, u64 enq_flags) static s32 pick_idle_cpu(struct task_struct *p, s32 prev_cpu)
{ {
const struct cpumask *online_cpumask, *idle_smtmask, *idle_cpumask; const struct cpumask *online_cpumask, *idle_smtmask, *idle_cpumask;
struct bpf_cpumask *l2_domain, *l3_domain; struct bpf_cpumask *l2_domain, *l3_domain;
@ -511,51 +511,6 @@ static s32 pick_idle_cpu(struct task_struct *p, s32 prev_cpu, u64 enq_flags)
*/ */
bpf_cpumask_and(l3_mask, p->cpus_ptr, cast_mask(l3_domain)); bpf_cpumask_and(l3_mask, p->cpus_ptr, cast_mask(l3_domain));
if (enq_flags & SCX_ENQ_WAKEUP) {
struct task_struct *current = (void *)bpf_get_current_task_btf();
struct bpf_cpumask *curr_l3_domain;
bool share_llc, has_idle;
/*
* Determine waker CPU scheduling domain.
*/
cpu = bpf_get_smp_processor_id();
cctx = try_lookup_cpu_ctx(cpu);
if (!cctx) {
cpu = -ENOENT;
goto out_put_cpumask;
}
curr_l3_domain = cctx->l3_cpumask;
if (!curr_l3_domain) {
scx_bpf_error("CPU LLC cpumask not initialized");
cpu = -ENOENT;
goto out_put_cpumask;
}
/*
* If both the waker and wakee share the same LLC keep using
* the same CPU if possible.
*/
share_llc = bpf_cpumask_test_cpu(prev_cpu, cast_mask(curr_l3_domain));
if (share_llc && scx_bpf_test_and_clear_cpu_idle(prev_cpu)) {
cpu = prev_cpu;
goto out_put_cpumask;
}
/*
* If the waker's domain is not saturated attempt to migrate
* the wakee on the same CPU as the waker.
*/
has_idle = bpf_cpumask_intersects(cast_mask(curr_l3_domain), idle_cpumask);
if (has_idle &&
bpf_cpumask_test_cpu(cpu, p->cpus_ptr) &&
!(current->flags & PF_EXITING) &&
scx_bpf_dsq_nr_queued(cpu_to_dsq(cpu)) == 0)
goto out_put_cpumask;
}
/* /*
* Find the best idle CPU, prioritizing full idle cores in SMT systems. * Find the best idle CPU, prioritizing full idle cores in SMT systems.
*/ */
@ -737,7 +692,7 @@ static void dispatch_task(const struct dispatched_task_ctx *task)
goto out_release; goto out_release;
out_kick_idle_cpu: out_kick_idle_cpu:
cpu = pick_idle_cpu(p, task->cpu, task->flags); cpu = pick_idle_cpu(p, task->cpu);
if (cpu >= 0) if (cpu >= 0)
scx_bpf_kick_cpu(cpu, 0); scx_bpf_kick_cpu(cpu, 0);
@ -770,7 +725,7 @@ int rs_select_cpu(struct task_cpu_arg *input)
return -EINVAL; return -EINVAL;
bpf_rcu_read_lock(); bpf_rcu_read_lock();
cpu = pick_idle_cpu(p, input->cpu, input->flags); cpu = pick_idle_cpu(p, input->cpu);
bpf_rcu_read_unlock(); bpf_rcu_read_unlock();
bpf_task_release(p); bpf_task_release(p);