mirror of
https://github.com/sched-ext/scx.git
synced 2024-11-22 02:41:49 +00:00
scx_lavd: add missing reset_lock_futex_boost()
reset_lock_futex_boost() should be called every context switch of a task. Otherwise, in the worst case, a task and that CPU could block the preemption. To avoid such a situation, add missing reset_lock_futex_boost() calls. Signed-off-by: Changwoo Min <changwoo@igalia.com>
This commit is contained in:
parent
b01ff79080
commit
8d63024be7
@ -132,9 +132,10 @@ struct task_ctx {
|
||||
u32 lat_cri_waker; /* waker's latency criticality */
|
||||
volatile s32 victim_cpu;
|
||||
u16 slice_boost_prio; /* how many times a task fully consumed the slice */
|
||||
u8 wakeup_ft; /* regular wakeup = 1, sync wakeup = 2 */
|
||||
volatile s16 lock_boost; /* lock boost count */
|
||||
volatile s16 futex_boost; /* futex boost count */
|
||||
volatile u8 need_lock_boost; /* need to boost lock for deadline calculation */
|
||||
u8 wakeup_ft; /* regular wakeup = 1, sync wakeup = 2 */
|
||||
volatile u32 *futex_uaddr; /* futex uaddr */
|
||||
|
||||
/*
|
||||
@ -176,7 +177,7 @@ enum {
|
||||
};
|
||||
|
||||
enum {
|
||||
LAVD_MSG_TASKC = 0x1,
|
||||
LAVD_MSG_TASKC = 0x1
|
||||
};
|
||||
|
||||
struct introspec {
|
||||
|
@ -93,11 +93,17 @@ static void dec_futex_boost(u32 *uaddr)
|
||||
try_dec_futex_boost(taskc, cpuc, uaddr);
|
||||
}
|
||||
|
||||
static void reset_lock_futex_boost(struct task_ctx *taskc)
|
||||
static void reset_lock_futex_boost(struct task_ctx *taskc, struct cpu_ctx *cpuc)
|
||||
{
|
||||
if (is_lock_holder(taskc)) {
|
||||
taskc->need_lock_boost = true;
|
||||
cpuc->nr_lhp++;
|
||||
}
|
||||
|
||||
taskc->lock_boost = 0;
|
||||
taskc->futex_boost = 0;
|
||||
taskc->futex_uaddr = NULL;
|
||||
cpuc->lock_holder = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -307,22 +307,18 @@ static u64 calc_lat_cri(struct task_struct *p, struct task_ctx *taskc,
|
||||
if (is_kernel_task(p))
|
||||
lat_cri += LAVD_LC_KTHREAD_FT;
|
||||
|
||||
/*
|
||||
* Reset task's lock and futex boost count
|
||||
* for a lock holder to be boosted only once.
|
||||
*/
|
||||
reset_lock_futex_boost(taskc, cpuc_cur);
|
||||
|
||||
/*
|
||||
* Prioritize a lock holder for faster system-wide forward progress.
|
||||
*/
|
||||
if (is_lock_holder(taskc)) {
|
||||
if (taskc->need_lock_boost) {
|
||||
taskc->need_lock_boost = false;
|
||||
lat_cri += (lat_cri * LAVD_LC_LOCK_HOLDER_FT) / 1000;
|
||||
|
||||
/*
|
||||
* Update statistics.
|
||||
*/
|
||||
cpuc_cur->nr_lhp++;
|
||||
|
||||
/*
|
||||
* Reset task's lock and futex boost count
|
||||
* for a lock holder to be boosted only once.
|
||||
*/
|
||||
reset_lock_futex_boost(taskc);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -363,7 +359,9 @@ static u64 calc_task_load_actual(struct task_ctx *taskc)
|
||||
* can be calculated from task's average run time and frequency.
|
||||
*/
|
||||
const s64 interval_adj = LAVD_TIME_ONE_SEC / LAVD_SYS_STAT_INTERVAL_NS;
|
||||
return (taskc->run_time_ns * taskc->run_freq) / interval_adj;
|
||||
u64 load = (taskc->run_time_ns * taskc->run_freq) / interval_adj;
|
||||
|
||||
return min(load, LAVD_TIME_ONE_SEC);
|
||||
}
|
||||
|
||||
static u64 clamp_time_slice_ns(u64 slice)
|
||||
@ -629,6 +627,12 @@ static void update_stat_for_stopping(struct task_struct *p,
|
||||
*/
|
||||
if (READ_ONCE(cur_svc_time) < taskc->svc_time)
|
||||
WRITE_ONCE(cur_svc_time, taskc->svc_time);
|
||||
|
||||
/*
|
||||
* Reset task's lock and futex boost count
|
||||
* for a lock holder to be boosted only once.
|
||||
*/
|
||||
reset_lock_futex_boost(taskc, cpuc);
|
||||
}
|
||||
|
||||
static void update_stat_for_quiescent(struct task_struct *p,
|
||||
@ -641,6 +645,12 @@ static void update_stat_for_quiescent(struct task_struct *p,
|
||||
*/
|
||||
cpuc->load_actual -= taskc->load_actual;
|
||||
cpuc->load_run_time_ns -= clamp_time_slice_ns(taskc->run_time_ns);
|
||||
|
||||
/*
|
||||
* Reset task's lock and futex boost count
|
||||
* for a lock holder to be boosted only once.
|
||||
*/
|
||||
reset_lock_futex_boost(taskc, cpuc);
|
||||
}
|
||||
|
||||
static bool match_task_core_type(struct task_ctx *taskc,
|
||||
@ -1174,8 +1184,7 @@ void BPF_STRUCT_OPS(lavd_dispatch, s32 cpu, struct task_struct *prev)
|
||||
u64 now = bpf_ktime_get_ns();
|
||||
struct cpu_ctx *cpuc;
|
||||
struct bpf_cpumask *active, *ovrflw;
|
||||
struct task_struct *p;
|
||||
struct task_ctx *taskc;
|
||||
struct task_struct *p, *taskc;
|
||||
u64 dsq_id = 0;
|
||||
bool try_consume = false;
|
||||
|
||||
@ -1289,20 +1298,27 @@ consume_out:
|
||||
*/
|
||||
if (!try_consume)
|
||||
return;
|
||||
|
||||
if (consume_task(cpu, cpuc, now))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If no other task is consumed, the scheduler will keep continue to
|
||||
* run the prev task, so let's re-assigne its time slice.
|
||||
* Reset prev task's lock and futex boost count
|
||||
* for a lock holder to be boosted only once.
|
||||
*/
|
||||
if (prev && (prev->scx.flags & SCX_TASK_QUEUED)) {
|
||||
if (prev) {
|
||||
taskc = get_task_ctx(prev);
|
||||
if (!taskc) {
|
||||
scx_bpf_error("Failed to look up task context");
|
||||
return;
|
||||
}
|
||||
prev->scx.slice = calc_time_slice(prev, taskc);
|
||||
reset_lock_futex_boost(taskc, cpuc);
|
||||
|
||||
/*
|
||||
* If nothing to run, continue to run the previous task.
|
||||
*/
|
||||
if (prev->scx.flags & SCX_TASK_QUEUED)
|
||||
prev->scx.slice = calc_time_slice(prev, taskc);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -51,9 +51,6 @@ rawtracepoint:sched_switch
|
||||
}
|
||||
|
||||
interval:s:1 {
|
||||
$scx_ops = kaddr("scx_ops");
|
||||
$ops = (struct sched_ext_ops*)$scx_ops;
|
||||
printf("scheduler: %s\n", $ops->name);
|
||||
print(@avg_lat);
|
||||
print(@usec_hist);
|
||||
print(@dsq_lat);
|
||||
|
Loading…
Reference in New Issue
Block a user