Revert "Merge pull request #746 from likewhatevs/layered-delay"

This reverts commit 2077b9a799, reversing
changes made to eb73005d07.
This commit is contained in:
Pat Somaru 2024-10-08 22:01:05 -04:00
parent e1232b5efc
commit c90144d761
No known key found for this signature in database
GPG Key ID: 1C2CAA6553D55978

View File

@ -1255,84 +1255,82 @@ void BPF_STRUCT_OPS(layered_dispatch, s32 cpu, struct task_struct *prev)
if (!disable_topology && dsq_iter_algo == DSQ_ITER_ROUND_ROBIN)
cpu_ctx_layer_idx_inc(cctx);
bpf_for(idx, 0, nr_layers) {
layer_idx = iter_layer_dsq_ctx(idx, cctx->layer_idx);
bool preempt = MEMBER_VPTR(layers, [layer_idx].preempt);
bool open = MEMBER_VPTR(layers, [layer_idx].open);
u32 layer_cpus = *MEMBER_VPTR(layers[layer_idx], .nr_cpus);
struct cpumask *layer_cpumask;
bool have_layer_cpumask = layer_cpumask = lookup_layer_cpumask(idx);
bool cpumask_test = false;
if (have_layer_cpumask)
cpumask_test = bpf_cpumask_test_cpu(cpu, layer_cpumask);
bool layer_matches = (have_layer_cpumask &&
(cpumask_test ||
(cpu <= nr_possible_cpus &&
cpu == fallback_cpu &&
layer_cpus == 0)));
if (disable_topology) {
/* consume preempting layers first */
if (preempt && scx_bpf_consume(idx))
bpf_for(idx, 0, nr_layers) {
if (disable_topology) {
if (MEMBER_VPTR(layers, [idx].preempt) && scx_bpf_consume(idx))
return;
} else {
layer_idx = iter_layer_dsq_ctx(idx, cctx->layer_idx);
bpf_for(llc_id, 0, nr_llcs) {
dsq_id = layer_dsq_id(layer_idx, llc_id);
if (MEMBER_VPTR(layers, [layer_idx].preempt) &&
scx_bpf_consume(dsq_id))
return;
}
}
}
/* make sure hi fallback dsq is empty */
dsq_id = cpu_hi_fallback_dsq_id(cpu);
if (scx_bpf_consume(dsq_id))
return;
/* consume !open layers second */
bpf_for(idx, 0, nr_layers) {
if (disable_topology) {
layer_idx = idx;
struct layer *layer = &layers[idx];
struct cpumask *layer_cpumask;
/* consume matching layers */
if (have_layer_cpumask)
{
if (cpumask_test ||
(cpu == fallback_cpu && layer_cpus == 0)) {
if (!(layer_cpumask = lookup_layer_cpumask(idx)))
return;
if (bpf_cpumask_test_cpu(cpu, layer_cpumask) ||
(cpu == fallback_cpu && layer->nr_cpus == 0)) {
if (scx_bpf_consume(idx))
return;
}
} else {
layer_idx = iter_layer_dsq_ctx(idx, cctx->layer_idx);
bpf_for(llc_id, 0, nr_llcs) {
struct layer *layer = &layers[layer_idx];
struct cpumask *layer_cpumask;
dsq_id = layer_dsq_id(layer_idx, llc_id);
/* consume matching layers */
if (!(layer_cpumask = lookup_layer_cpumask(layer_idx)))
return;
if (bpf_cpumask_test_cpu(cpu, layer_cpumask) ||
(cpu <= nr_possible_cpus && cpu == fallback_cpu &&
MEMBER_VPTR(layer, ->nr_cpus) == 0)) {
if (scx_bpf_consume(dsq_id))
return;
}
}
}
}
/* consume !preempting open layers */
bpf_for(idx, 0, nr_layers) {
if (disable_topology) {
if (!layers[idx].preempt && layers[idx].open &&
scx_bpf_consume(idx))
return;
} else {
u64 matching_dsq = -1;
u64 non_preempting_open_dsq;
layer_idx = iter_layer_dsq_ctx(idx, cctx->layer_idx);
bpf_for(llc_id, 0, nr_llcs) {
dsq_id = layer_dsq_id(layer_idx, llc_id);
/* consume preempting layers first, with no delay */
if (preempt && scx_bpf_consume(dsq_id))
return;
dsq_id = llc_hi_fallback_dsq_id(llc_id);
/* make sure hi fallback dsq is empty, with no delay */
if (scx_bpf_consume(dsq_id))
return;
/* consume matching layers */
if (layer_matches && matching_dsq == -1) {
matching_dsq = dsq_id;
break;
if (!MEMBER_VPTR(layers, [layer_idx].preempt) &&
MEMBER_VPTR(layers, [layer_idx].open) &&
scx_bpf_consume(dsq_id))
return;
}
}
/* consume !preempting open layers */
if ((!preempt && open) && !non_preempting_open_dsq
&& matching_dsq)
non_preempting_open_dsq = dsq_id;
}
/* preserve priority order of dsq execution */
if (matching_dsq != -1) {
if (scx_bpf_consume(matching_dsq))
return;
}
if(!non_preempting_open_dsq) {
if (scx_bpf_consume(non_preempting_open_dsq))
return;
}
}
}
/* consume lo fallback dsq */
scx_bpf_consume(LO_FALLBACK_DSQ);
}