From c38e749c366c46e0638def1645f363652c486031 Mon Sep 17 00:00:00 2001 From: Changwoo Min Date: Fri, 2 Aug 2024 14:47:05 +0900 Subject: [PATCH] scx_lavd: improve the equation for calculating ineligibility duration This commit include a few changes: - treat a new forked task more conservatively - defer the execution of more tasks for longer time using ineligibility duration - consider if a task is waken up in calculating ineligibility duration --- scheds/rust/scx_lavd/src/bpf/intf.h | 8 +++--- scheds/rust/scx_lavd/src/bpf/main.bpf.c | 34 ++++++++++++++----------- scheds/rust/scx_lavd/src/main.rs | 4 +-- 3 files changed, 25 insertions(+), 21 deletions(-) diff --git a/scheds/rust/scx_lavd/src/bpf/intf.h b/scheds/rust/scx_lavd/src/bpf/intf.h index 0105356..b7e5023 100644 --- a/scheds/rust/scx_lavd/src/bpf/intf.h +++ b/scheds/rust/scx_lavd/src/bpf/intf.h @@ -69,9 +69,10 @@ enum consts { LAVD_SLICE_BOOST_MAX_FT = 3, /* maximum additional 3x of slice */ LAVD_SLICE_BOOST_MAX_STEP = 6, /* 6 slice exhausitions in a row */ - LAVD_GREEDY_RATIO_NEW = 2000, + LAVD_NEW_PROC_PENALITY = 5, + LAVD_GREEDY_RATIO_NEW = (1000 * LAVD_NEW_PROC_PENALITY), - LAVD_ELIGIBLE_TIME_MAX = (1ULL * LAVD_TIME_ONE_SEC), + LAVD_ELIGIBLE_TIME_MAX = (9999ULL * LAVD_TIME_ONE_SEC), LAVD_CPU_UTIL_MAX = 1000, /* 100.0% */ LAVD_CPU_UTIL_MAX_FOR_CPUPERF = 850, /* 85.0% */ @@ -213,7 +214,6 @@ struct task_ctx { u64 slice_ns; /* time slice */ u32 greedy_ratio; /* task's overscheduling ratio compared to its nice priority */ u32 lat_cri; /* calculated latency criticality */ - u32 starv_cri; /* calculated starvation criticality */ volatile s32 victim_cpu; u16 slice_boost_prio; /* how many times a task fully consumed the slice */ @@ -231,7 +231,7 @@ struct task_ctx_x { u32 cpu_id; /* where a task ran */ u64 cpu_util; /* cpu utilization in [0..100] */ u32 avg_perf_cri; /* average performance criticality */ - u32 avg_lat_cri; /* average latency criticality */ + u32 thr_lat_cri; /* threshold for latency criticality */ u32 nr_active; /* number of active cores */ u32 cpuperf_cur; /* CPU's current performance target */ }; diff --git a/scheds/rust/scx_lavd/src/bpf/main.bpf.c b/scheds/rust/scx_lavd/src/bpf/main.bpf.c index 3942b01..5697908 100644 --- a/scheds/rust/scx_lavd/src/bpf/main.bpf.c +++ b/scheds/rust/scx_lavd/src/bpf/main.bpf.c @@ -434,7 +434,7 @@ int submit_task_ctx(struct task_struct *p, struct task_ctx *taskc, u32 cpu_id) m->taskc_x.static_prio = get_nice_prio(p); m->taskc_x.cpu_util = cpuc->util / 10; m->taskc_x.cpu_id = cpu_id; - m->taskc_x.avg_lat_cri = stat_cur->avg_lat_cri; + m->taskc_x.thr_lat_cri = stat_cur->thr_lat_cri; m->taskc_x.avg_perf_cri = stat_cur->avg_perf_cri; m->taskc_x.nr_active = stat_cur->nr_active; m->taskc_x.cpuperf_cur = cpuc->cpuperf_cur; @@ -939,7 +939,7 @@ static bool is_wakeup_ef(u64 enq_flags) return !!(enq_flags & SCX_ENQ_WAKEUP); } -static u64 calc_eligible_delta(struct task_ctx *taskc) +static u64 calc_eligible_delta(struct task_ctx *taskc, u64 enq_flags) { /* * We calculate how long a task should be ineligible for execution. To @@ -961,6 +961,7 @@ static u64 calc_eligible_delta(struct task_ctx *taskc) */ struct sys_stat *stat_cur = get_sys_stat_cur(); u64 delta_ns, lat_cri_ft; + bool is_wakeup; /* * Get how greedy this task has been to enforce fairness if necessary. @@ -977,26 +978,28 @@ static u64 calc_eligible_delta(struct task_ctx *taskc) goto out; } + is_wakeup = is_wakeup_ef(enq_flags); /* * Calculate ineligible duration based on greedy ratio, run_freq, and - * lat_cri. + * lat_cri. Prioritize wake-up tasks. */ delta_ns = (LAVD_TIME_ONE_SEC / (1000 * (taskc->run_freq + 1))) * taskc->greedy_ratio; + lat_cri_ft = taskc->lat_cri + (is_wakeup * (LAVD_LC_WAKEUP_FT >> 1)); - if (stat_cur->avg_lat_cri < taskc->lat_cri) { + if (have_scheduled(taskc) && stat_cur->thr_lat_cri < lat_cri_ft) { /* - * Prioritize above-average latency-critical tasks. + * Prioritize far above-average latency-critical tasks. */ - lat_cri_ft = taskc->lat_cri - stat_cur->avg_lat_cri + 1; + lat_cri_ft = lat_cri_ft - stat_cur->thr_lat_cri + 1; delta_ns /= lat_cri_ft; } else { /* * Deprioritize below-average latency-critical tasks. */ - lat_cri_ft = stat_cur->avg_lat_cri - taskc->lat_cri + 1; + lat_cri_ft = stat_cur->thr_lat_cri - lat_cri_ft + 1; delta_ns *= lat_cri_ft; } @@ -1074,22 +1077,21 @@ static void calc_lat_cri(struct task_struct *p, struct task_ctx *taskc) taskc->lat_cri = lat_cri; } -static void calc_starv_cri(struct task_ctx *taskc, bool is_wakeup) +static u64 calc_starv_cri(struct task_ctx *taskc, bool is_wakeup) { - taskc->starv_cri = calc_starvation_factor(taskc) + - (is_wakeup * LAVD_LC_WAKEUP_FT); + return calc_starvation_factor(taskc) + (is_wakeup * LAVD_LC_WAKEUP_FT); } static void calc_virtual_deadline_delta(struct task_struct *p, struct task_ctx *taskc, u64 enq_flags) { bool is_wakeup; + u64 sc; is_wakeup = is_wakeup_ef(enq_flags); calc_lat_cri(p, taskc); - calc_starv_cri(taskc, is_wakeup); - taskc->vdeadline_delta_ns = taskc->run_time_ns / (taskc->lat_cri + - taskc->starv_cri); + sc = calc_starv_cri(taskc, is_wakeup); + taskc->vdeadline_delta_ns = taskc->run_time_ns / (taskc->lat_cri + sc); } static u64 calc_task_load_actual(struct task_ctx *taskc) @@ -1377,7 +1379,7 @@ static void calc_when_to_run(struct task_struct *p, struct task_ctx *taskc, * overscheduled - eligible_time_ns. */ calc_virtual_deadline_delta(p, taskc, enq_flags); - calc_eligible_delta(taskc); + calc_eligible_delta(taskc, enq_flags); /* * Update the logical clock of the virtual deadline including @@ -1699,7 +1701,7 @@ static bool is_lat_cri_task(struct task_ctx *taskc) { struct sys_stat *stat_cur = get_sys_stat_cur(); - return taskc->lat_cri > stat_cur->avg_lat_cri; + return taskc->lat_cri > stat_cur->thr_lat_cri; } static void put_global_rq(struct task_struct *p, struct task_ctx *taskc, @@ -2531,6 +2533,7 @@ void BPF_STRUCT_OPS(lavd_update_idle, s32 cpu, bool idle) static void init_task_ctx(struct task_struct *p, struct task_ctx *taskc) { + struct sys_stat *stat_cur = get_sys_stat_cur(); u64 now = bpf_ktime_get_ns(); memset(taskc, 0, sizeof(*taskc)); @@ -2538,6 +2541,7 @@ static void init_task_ctx(struct task_struct *p, struct task_ctx *taskc) taskc->last_stopping_clk = now; /* for run_time_ns */ taskc->run_time_ns = LAVD_SLICE_MAX_NS; taskc->victim_cpu = (s32)LAVD_CPU_ID_NONE; + taskc->svc_time = stat_cur->avg_svc_time * LAVD_NEW_PROC_PENALITY; } void BPF_STRUCT_OPS(lavd_enable, struct task_struct *p) diff --git a/scheds/rust/scx_lavd/src/main.rs b/scheds/rust/scx_lavd/src/main.rs index c2ca2b9..b6c8964 100644 --- a/scheds/rust/scx_lavd/src/main.rs +++ b/scheds/rust/scx_lavd/src/main.rs @@ -344,7 +344,7 @@ impl<'a> Scheduler<'a> { "slc_ns", "grdy_rt", "lat_cri", - "avg_lc", + "thr_lc", "st_prio", "slc_bst", "run_freq", @@ -387,7 +387,7 @@ impl<'a> Scheduler<'a> { tc.slice_ns, tc.greedy_ratio, tc.lat_cri, - tx.avg_lat_cri, + tx.thr_lat_cri, tx.static_prio, tc.slice_boost_prio, tc.run_freq,