Merge pull request #921 from hodgesds/layered-formatting-fix

scx_layered: Fix formatting
This commit is contained in:
Daniel Hodges 2024-11-12 18:26:50 +00:00 committed by GitHub
commit 76310497bb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -1269,7 +1269,7 @@ struct {
* *
* Return: runnable_at delay, if any exists, in seconds. * Return: runnable_at delay, if any exists, in seconds.
*/ */
int get_delay_sec(struct task_struct *p, u64 jiffies_now) int get_delay_sec(struct task_struct *p, u64 jiffies_now)
{ {
u64 runnable_at, delta_secs; u64 runnable_at, delta_secs;
runnable_at = READ_ONCE(p->scx.runnable_at); runnable_at = READ_ONCE(p->scx.runnable_at);
@ -1287,9 +1287,9 @@ int get_delay_sec(struct task_struct *p, u64 jiffies_now)
* antistall_consume() - consume delayed DSQ * antistall_consume() - consume delayed DSQ
* @cpu: cpu number * @cpu: cpu number
* @cctx: cpu context * @cctx: cpu context
* *
* This function consumes a delayed DSQ. This is meant to be called * This function consumes a delayed DSQ. This is meant to be called
* from dispatch, before any other logic which could result in a * from dispatch, before any other logic which could result in a
* DSQ being consumed. * DSQ being consumed.
* *
* This is meant to prevent issues such as DSQs with affinitized tasks * This is meant to prevent issues such as DSQs with affinitized tasks
@ -1304,8 +1304,8 @@ bool antistall_consume(s32 cpu, struct cpu_ctx *cctx)
u32 zero; u32 zero;
bool consumed; bool consumed;
struct task_struct *p; struct task_struct *p;
cur_delay = 0; cur_delay = 0;
consumed = false; consumed = false;
zero = 0; zero = 0;
@ -1314,9 +1314,9 @@ bool antistall_consume(s32 cpu, struct cpu_ctx *cctx)
if (!cctx || !cctx->layer_idx || !cpu) if (!cctx || !cctx->layer_idx || !cpu)
return false; return false;
antistall_dsq = bpf_map_lookup_elem(&antistall_cpu_dsq, &zero); antistall_dsq = bpf_map_lookup_elem(&antistall_cpu_dsq, &zero);
if (!antistall_dsq) { if (!antistall_dsq) {
scx_bpf_error("cant happen"); scx_bpf_error("cant happen");
return false; return false;
@ -1324,12 +1324,12 @@ bool antistall_consume(s32 cpu, struct cpu_ctx *cctx)
if (*antistall_dsq == SCX_DSQ_INVALID) if (*antistall_dsq == SCX_DSQ_INVALID)
return false; return false;
consumed = scx_bpf_consume(*antistall_dsq); consumed = scx_bpf_consume(*antistall_dsq);
if (!consumed) if (!consumed)
goto reset; goto reset;
jiffies_now = bpf_jiffies64(); jiffies_now = bpf_jiffies64();
bpf_for_each(scx_dsq, p, *antistall_dsq, 0) { bpf_for_each(scx_dsq, p, *antistall_dsq, 0) {
@ -1337,10 +1337,10 @@ bool antistall_consume(s32 cpu, struct cpu_ctx *cctx)
if (cur_delay > antistall_sec) if (cur_delay > antistall_sec)
return consumed; return consumed;
goto reset; goto reset;
} }
reset: reset:
trace("antistall reset DSQ[%llu] SELECTED_CPU[%llu] DELAY[%llu]", *antistall_dsq, cpu, cur_delay); trace("antistall reset DSQ[%llu] SELECTED_CPU[%llu] DELAY[%llu]", *antistall_dsq, cpu, cur_delay);
*antistall_dsq = SCX_DSQ_INVALID; *antistall_dsq = SCX_DSQ_INVALID;
@ -1574,7 +1574,7 @@ void BPF_STRUCT_OPS(layered_dispatch, s32 cpu, struct task_struct *prev)
if (antistall_consume(cpu, cctx)) if (antistall_consume(cpu, cctx))
return; return;
/* /*
* if @prev was on SCX and is still runnable, we are here because @prev * if @prev was on SCX and is still runnable, we are here because @prev
* has exhausted its slice. We may want to keep running it on this CPU * has exhausted its slice. We may want to keep running it on this CPU
@ -2390,7 +2390,7 @@ static bool layered_monitor(void)
* If it cannot find such a CPU to flag, it will try to flag a CPU flagged to * If it cannot find such a CPU to flag, it will try to flag a CPU flagged to
* process another with a lesser delay if one exists. * process another with a lesser delay if one exists.
*/ */
u64 antistall_set(u64 dsq_id, u64 jiffies_now) u64 antistall_set(u64 dsq_id, u64 jiffies_now)
{ {
struct task_struct *p; struct task_struct *p;
struct task_ctx *tctx; struct task_ctx *tctx;
@ -2398,36 +2398,36 @@ u64 antistall_set(u64 dsq_id, u64 jiffies_now)
u64 *antistall_dsq, *delay, cur_delay; u64 *antistall_dsq, *delay, cur_delay;
bool first_pass; bool first_pass;
u32 zero; u32 zero;
zero = 0; zero = 0;
if (!dsq_id || !jiffies_now) if (!dsq_id || !jiffies_now)
return 0; return 0;
// verifier // verifier
bpf_rcu_read_lock(); bpf_rcu_read_lock();
bpf_for_each(scx_dsq, p, dsq_id, 0) { bpf_for_each(scx_dsq, p, dsq_id, 0) {
if (!(tctx = lookup_task_ctx(p))) if (!(tctx = lookup_task_ctx(p)))
goto unlock; goto unlock;
cur_delay = get_delay_sec(p, jiffies_now); cur_delay = get_delay_sec(p, jiffies_now);
if (cur_delay <= antistall_sec) if (cur_delay <= antistall_sec)
// check head task in dsq // check head task in dsq
goto unlock; goto unlock;
first_pass = true; first_pass = true;
look_for_cpu: look_for_cpu:
bpf_for(cpu, 0, nr_possible_cpus) { bpf_for(cpu, 0, nr_possible_cpus) {
if (!tctx->layered_cpumask) if (!tctx->layered_cpumask)
goto unlock; goto unlock;
if (!bpf_cpumask_test_cpu(cpu, cast_mask(tctx->layered_cpumask))) if (!bpf_cpumask_test_cpu(cpu, cast_mask(tctx->layered_cpumask)))
continue; continue;
antistall_dsq = bpf_map_lookup_percpu_elem(&antistall_cpu_dsq, &zero, cpu); antistall_dsq = bpf_map_lookup_percpu_elem(&antistall_cpu_dsq, &zero, cpu);
delay = bpf_map_lookup_percpu_elem(&antistall_cpu_max_delay, &zero, cpu); delay = bpf_map_lookup_percpu_elem(&antistall_cpu_max_delay, &zero, cpu);
if (!antistall_dsq || !delay) { if (!antistall_dsq || !delay) {
scx_bpf_error("cant happen"); scx_bpf_error("cant happen");
goto unlock; goto unlock;
@ -2442,15 +2442,15 @@ look_for_cpu:
if (first_pass) if (first_pass)
continue; continue;
if (*delay < cur_delay) { if (*delay < cur_delay) {
trace("antistall set DSQ[%llu] SELECTED_CPU[%llu] DELAY[%llu]", dsq_id, cpu, cur_delay); trace("antistall set DSQ[%llu] SELECTED_CPU[%llu] DELAY[%llu]", dsq_id, cpu, cur_delay);
*delay = cur_delay; *delay = cur_delay;
*antistall_dsq = dsq_id; *antistall_dsq = dsq_id;
goto unlock; goto unlock;
} }
} }
if (first_pass) { if (first_pass) {
first_pass = false; first_pass = false;
goto look_for_cpu; goto look_for_cpu;
@ -2481,7 +2481,7 @@ static bool antistall_scan(void)
return true; return true;
jiffies_now = bpf_jiffies64(); jiffies_now = bpf_jiffies64();
bpf_for(dsq_id, 0, nr_layers) { bpf_for(dsq_id, 0, nr_layers) {
antistall_set(dsq_id, jiffies_now); antistall_set(dsq_id, jiffies_now);
} }
@ -2490,11 +2490,11 @@ static bool antistall_scan(void)
dsq_id = cpu_hi_fallback_dsq_id(cpu); dsq_id = cpu_hi_fallback_dsq_id(cpu);
antistall_set(dsq_id, jiffies_now); antistall_set(dsq_id, jiffies_now);
} }
antistall_set(LO_FALLBACK_DSQ, jiffies_now); antistall_set(LO_FALLBACK_DSQ, jiffies_now);
antistall_set(HI_FALLBACK_DSQ_BASE, jiffies_now); antistall_set(HI_FALLBACK_DSQ_BASE, jiffies_now);
return true; return true;
} }
@ -2549,7 +2549,7 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(layered_init)
nr_online_cpus = 0; nr_online_cpus = 0;
bpf_for(i, 0, nr_possible_cpus) { bpf_for(i, 0, nr_possible_cpus) {
const volatile u8 *u8_ptr; const volatile u8 *u8_ptr;
init_antistall_dsq = bpf_map_lookup_percpu_elem(&antistall_cpu_dsq, &zero, i); init_antistall_dsq = bpf_map_lookup_percpu_elem(&antistall_cpu_dsq, &zero, i);
if (init_antistall_dsq) { if (init_antistall_dsq) {
*init_antistall_dsq = SCX_DSQ_INVALID; *init_antistall_dsq = SCX_DSQ_INVALID;