scx_lavd: incorporate task's weight to latency criticality

When calculating task's latency criticality, incorporate task's
weight into runtime, wake_freq, and wait_freq more systematically.
It looks nicer and works better under heavy load.

Signed-off-by: Changwoo Min <changwoo@igalia.com>
This commit is contained in:
Changwoo Min 2024-10-22 01:24:29 +09:00
parent 47dd1b9582
commit 07ed821511
2 changed files with 10 additions and 27 deletions

View File

@ -27,7 +27,6 @@ enum consts_internal {
LAVD_LC_FREQ_MAX = 1000000,
LAVD_LC_RUNTIME_MAX = LAVD_TIME_ONE_SEC,
LAVD_LC_RUNTIME_SHIFT = 15,
LAVD_LC_WAKEUP_FT = 30,
LAVD_LC_KTHREAD_FT = LAVD_LC_WAKEUP_FT,
LAVD_LC_LOCK_HOLDER_FT = 300, /* 30% boost */

View File

@ -235,26 +235,16 @@ out:
return ratio;
}
static u64 calc_runtime_factor(u64 runtime)
static u64 calc_runtime_factor(u64 runtime, u64 weight)
{
u64 ft = rsigmoid_u64(runtime, LAVD_LC_RUNTIME_MAX);
return (ft >> LAVD_LC_RUNTIME_SHIFT) + 1;
return (ft / weight) + 1;
}
static u64 calc_freq_factor(u64 freq)
static u64 calc_freq_factor(u64 freq, u64 weight)
{
u64 ft = sigmoid_u64(freq, LAVD_LC_FREQ_MAX);
return ft + 1;
}
static s64 calc_static_prio_factor(struct task_struct *p)
{
/*
* A nicer task with >20 static priority will get penalized with
* negative latency-criticality. However, a greedier task with <20
* static priority will get boosted.
*/
return (20 - get_nice_prio(p)) >> 1;
return (ft * weight) + 1;
}
static u64 calc_lat_cri(struct task_struct *p, struct task_ctx *taskc,
@ -273,9 +263,9 @@ static u64 calc_lat_cri(struct task_struct *p, struct task_ctx *taskc,
* is monotonically increasing since higher frequencies mean more
* latency-critical.
*/
wait_freq_ft = calc_freq_factor(taskc->wait_freq);
wake_freq_ft = calc_freq_factor(taskc->wake_freq);
runtime_ft = calc_runtime_factor(taskc->run_time_ns);
wait_freq_ft = calc_freq_factor(taskc->wait_freq, p->scx.weight);
wake_freq_ft = calc_freq_factor(taskc->wake_freq, p->scx.weight);
runtime_ft = calc_runtime_factor(taskc->run_time_ns, p->scx.weight);
/*
* Wake frequency and wait frequency represent how much a task is used
@ -289,11 +279,6 @@ static u64 calc_lat_cri(struct task_struct *p, struct task_ctx *taskc,
lat_cri += log2_u64(wait_freq_ft + 1);
lat_cri += log2_u64(wake_freq_ft + 1);
/*
* A user-provided nice value is a strong hint for latency-criticality.
*/
lat_cri += calc_static_prio_factor(p);
/*
* Prioritize a wake-up task since this is a clear sign of immediate
* consumer. If it is a synchronous wakeup, doule the prioritization.
@ -522,12 +507,11 @@ static void update_stat_for_running(struct task_struct *p,
* We use the log-ed value since the raw value follows the highly
* skewed distribution.
*/
wait_freq_ft = calc_freq_factor(taskc->wait_freq);
wake_freq_ft = calc_freq_factor(taskc->wake_freq);
wait_freq_ft = calc_freq_factor(taskc->wait_freq, p->scx.weight);
wake_freq_ft = calc_freq_factor(taskc->wake_freq, p->scx.weight);
perf_cri = log2_u64(wait_freq_ft * wake_freq_ft);
perf_cri += log2_u64(max(taskc->run_freq, 1) *
max(taskc->run_time_ns, 1));
perf_cri += calc_static_prio_factor(p);
max(taskc->run_time_ns, 1) * p->scx.weight);
perf_cri += taskc->wakeup_ft * LAVD_LC_WAKEUP_FT;
taskc->wakeup_ft = 0;