mirror of
https://github.com/JakeHillion/scx.git
synced 2024-12-01 21:37:12 +00:00
scx_layered: remove ->open from layer struct
This commit is contained in:
parent
fae5396fd6
commit
9cf137be99
@ -188,7 +188,6 @@ struct layer {
|
|||||||
u32 weight;
|
u32 weight;
|
||||||
|
|
||||||
int kind;
|
int kind;
|
||||||
bool open;
|
|
||||||
bool preempt;
|
bool preempt;
|
||||||
bool preempt_first;
|
bool preempt_first;
|
||||||
bool exclusive;
|
bool exclusive;
|
||||||
|
@ -532,7 +532,7 @@ bool should_try_preempt_first(s32 cand, struct layer *layer,
|
|||||||
if (!layer->preempt || !layer->preempt_first)
|
if (!layer->preempt || !layer->preempt_first)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!layer->open && !bpf_cpumask_test_cpu(cand, layered_cpumask))
|
if (layer->kind == LAYER_KIND_CONFINED && !bpf_cpumask_test_cpu(cand, layered_cpumask))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!(cand_cctx = lookup_cpu_ctx(cand)) || cand_cctx->current_preempt)
|
if (!(cand_cctx = lookup_cpu_ctx(cand)) || cand_cctx->current_preempt)
|
||||||
@ -561,7 +561,7 @@ s32 pick_idle_no_topo(struct task_struct *p, s32 prev_cpu,
|
|||||||
|
|
||||||
/* not much to do if bound to a single CPU */
|
/* not much to do if bound to a single CPU */
|
||||||
if (p->nr_cpus_allowed == 1 && scx_bpf_test_and_clear_cpu_idle(prev_cpu)) {
|
if (p->nr_cpus_allowed == 1 && scx_bpf_test_and_clear_cpu_idle(prev_cpu)) {
|
||||||
if (!layer->open && !bpf_cpumask_test_cpu(prev_cpu, layer_cpumask))
|
if (layer->kind == LAYER_KIND_CONFINED && !bpf_cpumask_test_cpu(prev_cpu, layer_cpumask))
|
||||||
lstat_inc(LSTAT_AFFN_VIOL, layer, cctx);
|
lstat_inc(LSTAT_AFFN_VIOL, layer, cctx);
|
||||||
return prev_cpu;
|
return prev_cpu;
|
||||||
}
|
}
|
||||||
@ -621,7 +621,7 @@ s32 pick_idle_cpu(struct task_struct *p, s32 prev_cpu,
|
|||||||
|
|
||||||
/* not much to do if bound to a single CPU */
|
/* not much to do if bound to a single CPU */
|
||||||
if (p->nr_cpus_allowed == 1 && scx_bpf_test_and_clear_cpu_idle(prev_cpu)) {
|
if (p->nr_cpus_allowed == 1 && scx_bpf_test_and_clear_cpu_idle(prev_cpu)) {
|
||||||
if (!layer->open && !bpf_cpumask_test_cpu(prev_cpu, layer_cpumask))
|
if (layer->kind == LAYER_KIND_CONFINED && !bpf_cpumask_test_cpu(prev_cpu, layer_cpumask))
|
||||||
lstat_inc(LSTAT_AFFN_VIOL, layer, cctx);
|
lstat_inc(LSTAT_AFFN_VIOL, layer, cctx);
|
||||||
return prev_cpu;
|
return prev_cpu;
|
||||||
}
|
}
|
||||||
@ -709,7 +709,7 @@ s32 pick_idle_cpu(struct task_struct *p, s32 prev_cpu,
|
|||||||
/*
|
/*
|
||||||
* If the layer is an open one, we can try the whole machine.
|
* If the layer is an open one, we can try the whole machine.
|
||||||
*/
|
*/
|
||||||
if (layer->open &&
|
if (layer->kind != LAYER_KIND_CONFINED &&
|
||||||
((cpu = pick_idle_cpu_from(p->cpus_ptr, prev_cpu,
|
((cpu = pick_idle_cpu_from(p->cpus_ptr, prev_cpu,
|
||||||
idle_cpumask)) >= 0)) {
|
idle_cpumask)) >= 0)) {
|
||||||
lstat_inc(LSTAT_OPEN_IDLE, layer, cctx);
|
lstat_inc(LSTAT_OPEN_IDLE, layer, cctx);
|
||||||
@ -1070,7 +1070,7 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
|
|||||||
(p->flags & PF_KTHREAD) && p->nr_cpus_allowed < nr_possible_cpus) {
|
(p->flags & PF_KTHREAD) && p->nr_cpus_allowed < nr_possible_cpus) {
|
||||||
struct cpumask *layer_cpumask;
|
struct cpumask *layer_cpumask;
|
||||||
|
|
||||||
if (!layer->open &&
|
if (layer->kind == LAYER_KIND_CONFINED &&
|
||||||
(layer_cpumask = lookup_layer_cpumask(tctx->layer)) &&
|
(layer_cpumask = lookup_layer_cpumask(tctx->layer)) &&
|
||||||
!bpf_cpumask_test_cpu(task_cpu, layer_cpumask))
|
!bpf_cpumask_test_cpu(task_cpu, layer_cpumask))
|
||||||
lstat_inc(LSTAT_AFFN_VIOL, layer, cctx);
|
lstat_inc(LSTAT_AFFN_VIOL, layer, cctx);
|
||||||
@ -1087,7 +1087,7 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
|
|||||||
* confined layer may fail to be consumed for an indefinite amount of
|
* confined layer may fail to be consumed for an indefinite amount of
|
||||||
* time. Queue them to the fallback DSQ.
|
* time. Queue them to the fallback DSQ.
|
||||||
*/
|
*/
|
||||||
if (!layer->open && !tctx->all_cpus_allowed) {
|
if (layer->kind == LAYER_KIND_CONFINED && !tctx->all_cpus_allowed) {
|
||||||
lstat_inc(LSTAT_AFFN_VIOL, layer, cctx);
|
lstat_inc(LSTAT_AFFN_VIOL, layer, cctx);
|
||||||
/*
|
/*
|
||||||
* We were previously dispatching to LO_FALLBACK_DSQ for any
|
* We were previously dispatching to LO_FALLBACK_DSQ for any
|
||||||
@ -1183,7 +1183,7 @@ static bool keep_running(struct cpu_ctx *cctx, struct task_struct *p)
|
|||||||
* CPU. If confined, keep running if and only if the layer has
|
* CPU. If confined, keep running if and only if the layer has
|
||||||
* idle CPUs.
|
* idle CPUs.
|
||||||
*/
|
*/
|
||||||
if (layer->open) {
|
if (layer->kind != LAYER_KIND_CONFINED) {
|
||||||
has_idle = !bpf_cpumask_empty(idle_cpumask);
|
has_idle = !bpf_cpumask_empty(idle_cpumask);
|
||||||
} else {
|
} else {
|
||||||
struct cpumask *layer_cpumask;
|
struct cpumask *layer_cpumask;
|
||||||
@ -1298,7 +1298,7 @@ void layered_dispatch_no_topo(s32 cpu, struct task_struct *prev)
|
|||||||
layer = MEMBER_VPTR(layers, [layer_idx]);
|
layer = MEMBER_VPTR(layers, [layer_idx]);
|
||||||
if (has_budget(costc, layer) == 0)
|
if (has_budget(costc, layer) == 0)
|
||||||
continue;
|
continue;
|
||||||
if (!layer->preempt && layers->open &&
|
if (!layer->preempt && layer->kind != LAYER_KIND_CONFINED &&
|
||||||
scx_bpf_consume(layer_idx))
|
scx_bpf_consume(layer_idx))
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1396,7 +1396,7 @@ int consume_open_no_preempt(struct cost *costc, u32 my_llc_id)
|
|||||||
u32 llc_id = rotate_llc_id(my_llc_id, llc_idx);
|
u32 llc_id = rotate_llc_id(my_llc_id, llc_idx);
|
||||||
dsq_id = layer_dsq_id(layer_idx, llc_id);
|
dsq_id = layer_dsq_id(layer_idx, llc_id);
|
||||||
|
|
||||||
if (!layer->preempt && layer->open && scx_bpf_consume(dsq_id))
|
if (!layer->preempt && layer->kind != LAYER_KIND_CONFINED && scx_bpf_consume(dsq_id))
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2285,7 +2285,7 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(layered_init)
|
|||||||
struct layer *layer = &layers[i];
|
struct layer *layer = &layers[i];
|
||||||
|
|
||||||
dbg("CFG LAYER[%d][%s] min_exec_ns=%lu open=%d preempt=%d exclusive=%d",
|
dbg("CFG LAYER[%d][%s] min_exec_ns=%lu open=%d preempt=%d exclusive=%d",
|
||||||
i, layer->name, layer->min_exec_ns, layer->open, layer->preempt,
|
i, layer->name, layer->min_exec_ns, layer->kind != LAYER_KIND_CONFINED, layer->preempt,
|
||||||
layer->exclusive);
|
layer->exclusive);
|
||||||
|
|
||||||
if (layer->nr_match_ors > MAX_LAYER_MATCH_ORS) {
|
if (layer->nr_match_ors > MAX_LAYER_MATCH_ORS) {
|
||||||
|
@ -1356,13 +1356,6 @@ impl<'a> Scheduler<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match &spec.kind {
|
|
||||||
LayerKind::Open { .. } | LayerKind::Grouped { .. } => {
|
|
||||||
layer.open.write(true);
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
|
|
||||||
perf_set |= layer.perf > 0;
|
perf_set |= layer.perf > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user