From c90144d761ecea48af113ca37d3004b5c9a05e6c Mon Sep 17 00:00:00 2001 From: Pat Somaru Date: Tue, 8 Oct 2024 22:01:05 -0400 Subject: [PATCH] Revert "Merge pull request #746 from likewhatevs/layered-delay" This reverts commit 2077b9a7991f630d8d42b91a91cbfabbba3e7206, reversing changes made to eb73005d07149a4cc0a1f991f72b910f703e1724. --- scheds/rust/scx_layered/src/bpf/main.bpf.c | 134 ++++++++++----------- 1 file changed, 66 insertions(+), 68 deletions(-) diff --git a/scheds/rust/scx_layered/src/bpf/main.bpf.c b/scheds/rust/scx_layered/src/bpf/main.bpf.c index 9708bce..080a5ca 100644 --- a/scheds/rust/scx_layered/src/bpf/main.bpf.c +++ b/scheds/rust/scx_layered/src/bpf/main.bpf.c @@ -1255,84 +1255,82 @@ void BPF_STRUCT_OPS(layered_dispatch, s32 cpu, struct task_struct *prev) if (!disable_topology && dsq_iter_algo == DSQ_ITER_ROUND_ROBIN) cpu_ctx_layer_idx_inc(cctx); + /* consume preempting layers first */ bpf_for(idx, 0, nr_layers) { - layer_idx = iter_layer_dsq_ctx(idx, cctx->layer_idx); - bool preempt = MEMBER_VPTR(layers, [layer_idx].preempt); - bool open = MEMBER_VPTR(layers, [layer_idx].open); - u32 layer_cpus = *MEMBER_VPTR(layers[layer_idx], .nr_cpus); - struct cpumask *layer_cpumask; - bool have_layer_cpumask = layer_cpumask = lookup_layer_cpumask(idx); - bool cpumask_test = false; - if (have_layer_cpumask) - cpumask_test = bpf_cpumask_test_cpu(cpu, layer_cpumask); - bool layer_matches = (have_layer_cpumask && - (cpumask_test || - (cpu <= nr_possible_cpus && - cpu == fallback_cpu && - layer_cpus == 0))); - if (disable_topology) { - /* consume preempting layers first */ - if (preempt && scx_bpf_consume(idx)) + if (MEMBER_VPTR(layers, [idx].preempt) && scx_bpf_consume(idx)) return; - - /* make sure hi fallback dsq is empty */ - dsq_id = cpu_hi_fallback_dsq_id(cpu); - if (scx_bpf_consume(dsq_id)) - return; - - /* consume matching layers */ - if (have_layer_cpumask) - { - if (cpumask_test || - (cpu == fallback_cpu && layer_cpus == 0)) { - if (scx_bpf_consume(idx)) - return; - } - } - - /* consume !preempting open layers */ - if (!layers[idx].preempt && layers[idx].open && - scx_bpf_consume(idx)) - return; - } else { - u64 matching_dsq = -1; - u64 non_preempting_open_dsq; - + layer_idx = iter_layer_dsq_ctx(idx, cctx->layer_idx); bpf_for(llc_id, 0, nr_llcs) { dsq_id = layer_dsq_id(layer_idx, llc_id); - /* consume preempting layers first, with no delay */ - if (preempt && scx_bpf_consume(dsq_id)) - return; - dsq_id = llc_hi_fallback_dsq_id(llc_id); - /* make sure hi fallback dsq is empty, with no delay */ - if (scx_bpf_consume(dsq_id)) - return; - - /* consume matching layers */ - if (layer_matches && matching_dsq == -1) { - matching_dsq = dsq_id; - break; - } - /* consume !preempting open layers */ - if ((!preempt && open) && !non_preempting_open_dsq - && matching_dsq) - non_preempting_open_dsq = dsq_id; - } - - /* preserve priority order of dsq execution */ - if (matching_dsq != -1) { - if (scx_bpf_consume(matching_dsq)) - return; - } - if(!non_preempting_open_dsq) { - if (scx_bpf_consume(non_preempting_open_dsq)) + if (MEMBER_VPTR(layers, [layer_idx].preempt) && + scx_bpf_consume(dsq_id)) return; } } } - /* consume lo fallback dsq */ + + dsq_id = cpu_hi_fallback_dsq_id(cpu); + if (scx_bpf_consume(dsq_id)) + return; + + /* consume !open layers second */ + bpf_for(idx, 0, nr_layers) { + if (disable_topology) { + layer_idx = idx; + struct layer *layer = &layers[idx]; + struct cpumask *layer_cpumask; + + /* consume matching layers */ + if (!(layer_cpumask = lookup_layer_cpumask(idx))) + return; + + if (bpf_cpumask_test_cpu(cpu, layer_cpumask) || + (cpu == fallback_cpu && layer->nr_cpus == 0)) { + if (scx_bpf_consume(idx)) + return; + } + } else { + layer_idx = iter_layer_dsq_ctx(idx, cctx->layer_idx); + bpf_for(llc_id, 0, nr_llcs) { + struct layer *layer = &layers[layer_idx]; + struct cpumask *layer_cpumask; + dsq_id = layer_dsq_id(layer_idx, llc_id); + + /* consume matching layers */ + if (!(layer_cpumask = lookup_layer_cpumask(layer_idx))) + return; + + if (bpf_cpumask_test_cpu(cpu, layer_cpumask) || + (cpu <= nr_possible_cpus && cpu == fallback_cpu && + MEMBER_VPTR(layer, ->nr_cpus) == 0)) { + if (scx_bpf_consume(dsq_id)) + return; + } + } + } + } + + /* consume !preempting open layers */ + bpf_for(idx, 0, nr_layers) { + if (disable_topology) { + if (!layers[idx].preempt && layers[idx].open && + scx_bpf_consume(idx)) + return; + } else { + layer_idx = iter_layer_dsq_ctx(idx, cctx->layer_idx); + bpf_for(llc_id, 0, nr_llcs) { + dsq_id = layer_dsq_id(layer_idx, llc_id); + + if (!MEMBER_VPTR(layers, [layer_idx].preempt) && + MEMBER_VPTR(layers, [layer_idx].open) && + scx_bpf_consume(dsq_id)) + return; + } + } + } + scx_bpf_consume(LO_FALLBACK_DSQ); }