mirror of
https://github.com/sched-ext/scx.git
synced 2024-11-27 21:20:24 +00:00
scx_layered: Add helper for layer slice duration
Add a helper for returning the appropriate slice duration for a layer and replace a various instances where the slice value was being recalculated. Signed-off-by: Daniel Hodges <hodges.daniel.scott@gmail.com>
This commit is contained in:
parent
835f0d0e6a
commit
cad3413886
@ -92,6 +92,11 @@ static struct layer *lookup_layer(int idx)
|
||||
return &layers[idx];
|
||||
}
|
||||
|
||||
static __always_inline u64 layer_slice_ns(struct layer *layer)
|
||||
{
|
||||
return layer->slice_ns > 0 ? layer->slice_ns : slice_ns;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
int rotate_layer_id(u32 base_layer_id, u32 rotation)
|
||||
{
|
||||
@ -776,8 +781,8 @@ s32 BPF_STRUCT_OPS(layered_select_cpu, struct task_struct *p, s32 prev_cpu, u64
|
||||
|
||||
if (cpu >= 0) {
|
||||
lstat_inc(LSTAT_SEL_LOCAL, layer, cctx);
|
||||
u64 layer_slice_ns = layer->slice_ns > 0 ? layer->slice_ns : slice_ns;
|
||||
scx_bpf_dispatch(p, SCX_DSQ_LOCAL, layer_slice_ns, 0);
|
||||
u64 slice_ns = layer_slice_ns(layer);
|
||||
scx_bpf_dispatch(p, SCX_DSQ_LOCAL, slice_ns, 0);
|
||||
return cpu;
|
||||
} else {
|
||||
return prev_cpu;
|
||||
@ -1066,7 +1071,7 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
|
||||
|
||||
try_preempt_first = cctx->try_preempt_first;
|
||||
cctx->try_preempt_first = false;
|
||||
u64 layer_slice_ns = layer->slice_ns > 0 ? layer->slice_ns : slice_ns;
|
||||
u64 slice_ns = layer_slice_ns(layer);
|
||||
|
||||
if (cctx->yielding) {
|
||||
lstat_inc(LSTAT_YIELD, layer, cctx);
|
||||
@ -1086,8 +1091,8 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
|
||||
* Limit the amount of budget that an idling task can accumulate
|
||||
* to one slice.
|
||||
*/
|
||||
if (vtime_before(vtime, layer->vtime_now - layer_slice_ns))
|
||||
vtime = layer->vtime_now - layer_slice_ns;
|
||||
if (vtime_before(vtime, layer->vtime_now - slice_ns))
|
||||
vtime = layer->vtime_now - slice_ns;
|
||||
|
||||
/*
|
||||
* Special-case per-cpu kthreads which aren't in a preempting layer so
|
||||
@ -1138,13 +1143,13 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
|
||||
|
||||
if (disable_topology) {
|
||||
tctx->last_dsq = tctx->layer;
|
||||
scx_bpf_dispatch_vtime(p, tctx->layer, layer_slice_ns, vtime, enq_flags);
|
||||
scx_bpf_dispatch_vtime(p, tctx->layer, slice_ns, vtime, enq_flags);
|
||||
} else {
|
||||
u32 llc_id = cpu_to_llc_id(tctx->last_cpu >= 0 ? tctx->last_cpu :
|
||||
bpf_get_smp_processor_id());
|
||||
idx = layer_dsq_id(layer->idx, llc_id);
|
||||
tctx->last_dsq = idx;
|
||||
scx_bpf_dispatch_vtime(p, idx, layer_slice_ns, vtime, enq_flags);
|
||||
scx_bpf_dispatch_vtime(p, idx, slice_ns, vtime, enq_flags);
|
||||
}
|
||||
|
||||
preempt:
|
||||
@ -1166,9 +1171,9 @@ static bool keep_running(struct cpu_ctx *cctx, struct task_struct *p)
|
||||
if (!(tctx = lookup_task_ctx(p)) || !(layer = lookup_layer(tctx->layer)))
|
||||
goto no;
|
||||
|
||||
u64 layer_slice_ns = layer->slice_ns > 0 ? layer->slice_ns : slice_ns;
|
||||
u64 slice_ns = layer_slice_ns(layer);
|
||||
/* @p has fully consumed its slice and still wants to run */
|
||||
cctx->ran_current_for += layer_slice_ns;
|
||||
cctx->ran_current_for += slice_ns;
|
||||
|
||||
/*
|
||||
* There wasn't anything in the local or global DSQ, but there may be
|
||||
@ -1193,7 +1198,7 @@ static bool keep_running(struct cpu_ctx *cctx, struct task_struct *p)
|
||||
*/
|
||||
if (disable_topology) {
|
||||
if (!scx_bpf_dsq_nr_queued(layer->idx)) {
|
||||
p->scx.slice = layer_slice_ns;
|
||||
p->scx.slice = slice_ns;
|
||||
lstat_inc(LSTAT_KEEP, layer, cctx);
|
||||
return true;
|
||||
}
|
||||
@ -1202,7 +1207,7 @@ static bool keep_running(struct cpu_ctx *cctx, struct task_struct *p)
|
||||
tctx->last_cpu :
|
||||
bpf_get_smp_processor_id());
|
||||
if (!scx_bpf_dsq_nr_queued(dsq_id)) {
|
||||
p->scx.slice = layer_slice_ns;
|
||||
p->scx.slice = slice_ns;
|
||||
lstat_inc(LSTAT_KEEP, layer, cctx);
|
||||
return true;
|
||||
}
|
||||
@ -1229,7 +1234,7 @@ static bool keep_running(struct cpu_ctx *cctx, struct task_struct *p)
|
||||
scx_bpf_put_idle_cpumask(idle_cpumask);
|
||||
|
||||
if (has_idle) {
|
||||
p->scx.slice = layer_slice_ns;
|
||||
p->scx.slice = slice_ns;
|
||||
lstat_inc(LSTAT_KEEP, layer, cctx);
|
||||
return true;
|
||||
}
|
||||
@ -2059,11 +2064,11 @@ void BPF_STRUCT_OPS(layered_stopping, struct task_struct *p, bool runnable)
|
||||
cctx->current_preempt = false;
|
||||
cctx->prev_exclusive = cctx->current_exclusive;
|
||||
cctx->current_exclusive = false;
|
||||
u64 layer_slice_ns = layer->slice_ns > 0 ? layer->slice_ns : slice_ns;
|
||||
u64 slice_ns = layer_slice_ns(layer);
|
||||
|
||||
/* scale the execution time by the inverse of the weight and charge */
|
||||
if (cctx->yielding && used < layer_slice_ns)
|
||||
used = layer_slice_ns;
|
||||
if (cctx->yielding && used < slice_ns)
|
||||
used = slice_ns;
|
||||
p->scx.dsq_vtime += used * 100 / p->scx.weight;
|
||||
cctx->maybe_idle = true;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user