scx_lavd: change the greedy penalty function

We used to give a penalty in latency linearly to the greedy ratio.
However, this impacts the greedy ratio too much in determining the
virtual deadline, especially among under-utilized tasks (< 100.0%).
Now, we treat all under-utilized tasks with the same greedy ratio
(= 100.0%). For over-utilized tasks, we give a bit milder penalty
to avoid sudden latency spikes.

Signed-off-by: Changwoo Min <changwoo@igalia.com>
This commit is contained in:
Changwoo Min 2024-10-23 21:33:40 +09:00
parent 9acf950b75
commit 731a7871d7
2 changed files with 25 additions and 7 deletions

View File

@ -28,6 +28,7 @@ enum consts_internal {
LAVD_LC_FREQ_MAX = 1000000, LAVD_LC_FREQ_MAX = 1000000,
LAVD_LC_RUNTIME_MAX = LAVD_TIME_ONE_SEC, LAVD_LC_RUNTIME_MAX = LAVD_TIME_ONE_SEC,
LAVD_LC_WEIGHT_BOOST = 128, /* 2^7 */ LAVD_LC_WEIGHT_BOOST = 128, /* 2^7 */
LAVD_LC_GREEDY_PENALTY = 20, /* 20% */
LAVD_SLICE_BOOST_MAX_FT = 3, /* maximum additional 3x of slice */ LAVD_SLICE_BOOST_MAX_FT = 3, /* maximum additional 3x of slice */
LAVD_SLICE_BOOST_MAX_STEP = 6, /* 6 slice exhausitions in a row */ LAVD_SLICE_BOOST_MAX_STEP = 6, /* 6 slice exhausitions in a row */
@ -40,14 +41,14 @@ enum consts_internal {
LAVD_CPU_ID_NONE = ((u32)-1), LAVD_CPU_ID_NONE = ((u32)-1),
LAVD_SYS_STAT_INTERVAL_NS = (50ULL * NSEC_PER_MSEC), LAVD_SYS_STAT_INTERVAL_NS = (50ULL * NSEC_PER_MSEC),
LAVD_SYS_STAT_DECAY_TIMES = (2ULL * LAVD_TIME_ONE_SEC) / LAVD_SYS_STAT_INTERVAL_NS, LAVD_SYS_STAT_DECAY_TIMES = ((2ULL * LAVD_TIME_ONE_SEC) / LAVD_SYS_STAT_INTERVAL_NS),
LAVD_CC_PER_CORE_MAX_CTUIL = 500, /* maximum per-core CPU utilization */ LAVD_CC_PER_CORE_MAX_CTUIL = 500, /* maximum per-core CPU utilization */
LAVD_CC_PER_TURBO_CORE_MAX_CTUIL = 750, /* maximum per-core CPU utilization for a turbo core */ LAVD_CC_PER_TURBO_CORE_MAX_CTUIL = 750, /* maximum per-core CPU utilization for a turbo core */
LAVD_CC_NR_ACTIVE_MIN = 1, /* num of mininum active cores */ LAVD_CC_NR_ACTIVE_MIN = 1, /* num of mininum active cores */
LAVD_CC_NR_OVRFLW = 1, /* num of overflow cores */ LAVD_CC_NR_OVRFLW = 1, /* num of overflow cores */
LAVD_CC_CPU_PIN_INTERVAL = (1ULL * LAVD_TIME_ONE_SEC), LAVD_CC_CPU_PIN_INTERVAL = (1ULL * LAVD_TIME_ONE_SEC),
LAVD_CC_CPU_PIN_INTERVAL_DIV = (LAVD_CC_CPU_PIN_INTERVAL / LAVD_CC_CPU_PIN_INTERVAL_DIV = (LAVD_CC_CPU_PIN_INTERVAL / LAVD_SYS_STAT_INTERVAL_NS),
LAVD_SYS_STAT_INTERVAL_NS),
LAVD_AP_HIGH_UTIL = 700, /* balanced mode when 10% < cpu util <= 40%, LAVD_AP_HIGH_UTIL = 700, /* balanced mode when 10% < cpu util <= 40%,
performance mode when cpu util > 40% */ performance mode when cpu util > 40% */

View File

@ -235,6 +235,21 @@ out:
return ratio; return ratio;
} }
static u32 calc_greedy_factor(u32 greedy_ratio)
{
/*
* For all under-utilized tasks, we treat them equally.
*/
if (greedy_ratio <= 1000)
return 1000;
/*
* For over-utilized tasks, we give some mild penalty.
*/
return 1000 + ((greedy_ratio - 1000) / LAVD_LC_GREEDY_PENALTY);
}
static u64 calc_runtime_factor(u64 runtime, u64 weight_ft) static u64 calc_runtime_factor(u64 runtime, u64 weight_ft)
{ {
u64 ft = rsigmoid_u64(runtime, LAVD_LC_RUNTIME_MAX); u64 ft = rsigmoid_u64(runtime, LAVD_LC_RUNTIME_MAX);
@ -343,14 +358,17 @@ static void calc_virtual_deadline_delta(struct task_struct *p,
struct cpu_ctx *cpuc_cur, struct cpu_ctx *cpuc_cur,
u64 enq_flags) u64 enq_flags)
{ {
u64 deadline, lat_cri, greedy_ratio; u64 deadline, lat_cri;
u32 greedy_ratio, greedy_ft;
/* /*
* Calculate the deadline based on latency criticality and greedy ratio. * Calculate the deadline based on latency criticality and greedy ratio.
*/ */
lat_cri = calc_lat_cri(p, taskc, cpuc_cur, enq_flags); lat_cri = calc_lat_cri(p, taskc, cpuc_cur, enq_flags);
greedy_ratio = calc_greedy_ratio(taskc); greedy_ratio = calc_greedy_ratio(taskc);
deadline = (taskc->run_time_ns / lat_cri) * greedy_ratio; greedy_ft = calc_greedy_factor(greedy_ratio);
deadline = (taskc->run_time_ns / lat_cri) * greedy_ft;
taskc->vdeadline_delta_ns = deadline; taskc->vdeadline_delta_ns = deadline;
} }
@ -570,7 +588,7 @@ static void update_stat_for_stopping(struct task_struct *p,
struct cpu_ctx *cpuc) struct cpu_ctx *cpuc)
{ {
u64 now = bpf_ktime_get_ns(); u64 now = bpf_ktime_get_ns();
u64 old_run_time_ns, suspended_duration, task_run_time; u64 suspended_duration, task_run_time;
/* /*
* Update task's run_time. When a task is scheduled consecutively * Update task's run_time. When a task is scheduled consecutively
@ -581,7 +599,6 @@ static void update_stat_for_stopping(struct task_struct *p,
* consecutive execution is accumulated and reflected in the * consecutive execution is accumulated and reflected in the
* calculation of runtime statistics. * calculation of runtime statistics.
*/ */
old_run_time_ns = taskc->run_time_ns;
suspended_duration = get_suspended_duration_and_reset(cpuc); suspended_duration = get_suspended_duration_and_reset(cpuc);
task_run_time = now - taskc->last_running_clk - suspended_duration; task_run_time = now - taskc->last_running_clk - suspended_duration;
taskc->acc_run_time_ns += task_run_time; taskc->acc_run_time_ns += task_run_time;