scx_lavd: rename avg_perf_cri to thr_perf_cri

As a preparation to improve the performance criticality logic, we first
rename "avg_perf_cri" to "thr_perf_cri" since average is no longer the
threshold.

Signed-off-by: Changwoo Min <changwoo@igalia.com>
This commit is contained in:
Changwoo Min 2024-09-27 13:12:20 +09:00
parent aea431c0c6
commit f07023e42b
4 changed files with 16 additions and 16 deletions

View File

@ -120,7 +120,7 @@ struct sys_stat {
volatile u32 max_lat_cri; /* maximum latency criticality (LC) */ volatile u32 max_lat_cri; /* maximum latency criticality (LC) */
volatile u32 thr_lat_cri; /* latency criticality threshold for kicking */ volatile u32 thr_lat_cri; /* latency criticality threshold for kicking */
volatile u32 avg_perf_cri; /* average performance criticality */ volatile u32 thr_perf_cri; /* performance criticality threshold */
volatile u32 nr_violation; /* number of utilization violation */ volatile u32 nr_violation; /* number of utilization violation */
volatile u32 nr_active; /* number of active cores */ volatile u32 nr_active; /* number of active cores */
@ -289,7 +289,7 @@ struct task_ctx_x {
u16 static_prio; /* nice priority */ u16 static_prio; /* nice priority */
u32 cpu_id; /* where a task ran */ u32 cpu_id; /* where a task ran */
u64 cpu_util; /* cpu utilization in [0..100] */ u64 cpu_util; /* cpu utilization in [0..100] */
u32 avg_perf_cri; /* average performance criticality */ u32 thr_perf_cri; /* performance criticality threshold */
u32 avg_lat_cri; /* average latency criticality */ u32 avg_lat_cri; /* average latency criticality */
u32 nr_active; /* number of active cores */ u32 nr_active; /* number of active cores */
u32 cpuperf_cur; /* CPU's current performance target */ u32 cpuperf_cur; /* CPU's current performance target */

View File

@ -438,7 +438,7 @@ static bool is_lat_cri(struct task_ctx *taskc, struct sys_stat *stat_cur)
static bool is_perf_cri(struct task_ctx *taskc, struct sys_stat *stat_cur) static bool is_perf_cri(struct task_ctx *taskc, struct sys_stat *stat_cur)
{ {
if (READ_ONCE(taskc->on_big) && READ_ONCE(taskc->on_little)) if (READ_ONCE(taskc->on_big) && READ_ONCE(taskc->on_little))
return taskc->perf_cri >= stat_cur->avg_perf_cri; return taskc->perf_cri >= stat_cur->thr_perf_cri;
return READ_ONCE(taskc->on_big); return READ_ONCE(taskc->on_big);
} }
@ -475,7 +475,7 @@ int submit_task_ctx(struct task_struct *p, struct task_ctx *taskc, u32 cpu_id)
m->taskc_x.cpu_util = cpuc->util / 10; m->taskc_x.cpu_util = cpuc->util / 10;
m->taskc_x.cpu_id = cpu_id; m->taskc_x.cpu_id = cpu_id;
m->taskc_x.avg_lat_cri = stat_cur->avg_lat_cri; m->taskc_x.avg_lat_cri = stat_cur->avg_lat_cri;
m->taskc_x.avg_perf_cri = stat_cur->avg_perf_cri; m->taskc_x.thr_perf_cri = stat_cur->thr_perf_cri;
m->taskc_x.nr_active = stat_cur->nr_active; m->taskc_x.nr_active = stat_cur->nr_active;
m->taskc_x.cpuperf_cur = cpuc->cpuperf_cur; m->taskc_x.cpuperf_cur = cpuc->cpuperf_cur;
@ -613,7 +613,7 @@ struct sys_stat_ctx {
u32 nr_pc_on_big; u32 nr_pc_on_big;
u32 nr_lc_on_big; u32 nr_lc_on_big;
u64 sum_perf_cri; u64 sum_perf_cri;
u32 avg_perf_cri; u32 thr_perf_cri;
u64 new_util; u64 new_util;
u32 nr_violation; u32 nr_violation;
}; };
@ -761,11 +761,11 @@ static void calc_sys_stat(struct sys_stat_ctx *c)
*/ */
c->max_lat_cri = c->stat_cur->max_lat_cri; c->max_lat_cri = c->stat_cur->max_lat_cri;
c->avg_lat_cri = c->stat_cur->avg_lat_cri; c->avg_lat_cri = c->stat_cur->avg_lat_cri;
c->avg_perf_cri = c->stat_cur->avg_perf_cri; c->thr_perf_cri = c->stat_cur->thr_perf_cri;
} }
else { else {
c->avg_lat_cri = c->sum_lat_cri / c->nr_sched; c->avg_lat_cri = c->sum_lat_cri / c->nr_sched;
c->avg_perf_cri = c->sum_perf_cri / c->nr_sched; c->thr_perf_cri = c->sum_perf_cri / c->nr_sched;
} }
} }
@ -791,8 +791,8 @@ static void update_sys_stat_next(struct sys_stat_ctx *c)
calc_avg32(stat_cur->avg_lat_cri, c->avg_lat_cri); calc_avg32(stat_cur->avg_lat_cri, c->avg_lat_cri);
stat_next->thr_lat_cri = stat_next->max_lat_cri - stat_next->thr_lat_cri = stat_next->max_lat_cri -
((stat_next->max_lat_cri - stat_next->avg_lat_cri) >> 1); ((stat_next->max_lat_cri - stat_next->avg_lat_cri) >> 1);
stat_next->avg_perf_cri = stat_next->thr_perf_cri =
calc_avg32(stat_cur->avg_perf_cri, c->avg_perf_cri); calc_avg32(stat_cur->thr_perf_cri, c->thr_perf_cri);
stat_next->nr_violation = stat_next->nr_violation =
calc_avg32(stat_cur->nr_violation, c->nr_violation); calc_avg32(stat_cur->nr_violation, c->nr_violation);
@ -2554,7 +2554,7 @@ static int calc_cpuperf_target(struct sys_stat *stat_cur,
* current CPU utilization (cpuc->util) and 2) the current task's * current CPU utilization (cpuc->util) and 2) the current task's
* performance criticality (taskc->perf_cri) compared to the * performance criticality (taskc->perf_cri) compared to the
* system-wide average performance criticality * system-wide average performance criticality
* (stat_cur->avg_perf_cri). * (stat_cur->thr_perf_cri).
* *
* When a current CPU utilization is 85% and the current task's * When a current CPU utilization is 85% and the current task's
* performance criticality is the same as the system-wide average * performance criticality is the same as the system-wide average
@ -2567,7 +2567,7 @@ static int calc_cpuperf_target(struct sys_stat *stat_cur,
* high when a non-performance-critical task is running (i.e., * high when a non-performance-critical task is running (i.e.,
* deboosting CPU frequency). * deboosting CPU frequency).
*/ */
max_load = stat_cur->avg_perf_cri * LAVD_CPU_UTIL_MAX_FOR_CPUPERF; max_load = stat_cur->thr_perf_cri * LAVD_CPU_UTIL_MAX_FOR_CPUPERF;
cpu_load = taskc->perf_cri * cpuc->util; cpu_load = taskc->perf_cri * cpuc->util;
cpuperf_target = (cpu_load * SCX_CPUPERF_ONE) / max_load; cpuperf_target = (cpu_load * SCX_CPUPERF_ONE) / max_load;
cpuperf_target = min(cpuperf_target, SCX_CPUPERF_ONE); cpuperf_target = min(cpuperf_target, SCX_CPUPERF_ONE);

View File

@ -637,7 +637,7 @@ impl<'a> Scheduler<'a> {
wait_freq: tc.wait_freq, wait_freq: tc.wait_freq,
wake_freq: tc.wake_freq, wake_freq: tc.wake_freq,
perf_cri: tc.perf_cri, perf_cri: tc.perf_cri,
avg_perf_cri: tx.avg_perf_cri, thr_perf_cri: tx.thr_perf_cri,
cpuperf_cur: tx.cpuperf_cur, cpuperf_cur: tx.cpuperf_cur,
cpu_util: tx.cpu_util, cpu_util: tx.cpu_util,
nr_active: tx.nr_active, nr_active: tx.nr_active,

View File

@ -165,8 +165,8 @@ pub struct SchedSample {
pub wake_freq: u64, pub wake_freq: u64,
#[stat(desc = "Performance criticality of this task")] #[stat(desc = "Performance criticality of this task")]
pub perf_cri: u32, pub perf_cri: u32,
#[stat(desc = "Average performance criticality in a system")] #[stat(desc = "Performance criticality threshold")]
pub avg_perf_cri: u32, pub thr_perf_cri: u32,
#[stat(desc = "Target performance level of this CPU")] #[stat(desc = "Target performance level of this CPU")]
pub cpuperf_cur: u32, pub cpuperf_cur: u32,
#[stat(desc = "CPU utilization of this particular CPU")] #[stat(desc = "CPU utilization of this particular CPU")]
@ -205,7 +205,7 @@ impl SchedSample {
"WAIT_FREQ", "WAIT_FREQ",
"WAKE_FREQ", "WAKE_FREQ",
"PERF_CRI", "PERF_CRI",
"AVG_PC", "THR_PC",
"CPUFREQ", "CPUFREQ",
"CPU_UTIL", "CPU_UTIL",
"NR_ACT", "NR_ACT",
@ -246,7 +246,7 @@ impl SchedSample {
self.wait_freq, self.wait_freq,
self.wake_freq, self.wake_freq,
self.perf_cri, self.perf_cri,
self.avg_perf_cri, self.thr_perf_cri,
self.cpuperf_cur, self.cpuperf_cur,
self.cpu_util, self.cpu_util,
self.nr_active, self.nr_active,