mirror of
https://github.com/sched-ext/scx.git
synced 2024-11-28 13:40:28 +00:00
scx_rustland: use dynamic time slice in the user-space scheduler
Implement a simple logic in the user-space scheduler to automatically adjust the tasks' time slice: reduce the time slice by a scaling factor of (nr_waiting / nr_cpus + 1), where nr_waiting is the amount of tasks waiting in the scheduler and nr_cpus is the amount of CPUs in the system. Using a fine-grained time slice as the number of tasks in the system grows, improves responsiveness of low-latency activities (e.g., audio, video games), also in presence of other CPU-intensive tasks that are concurrently running in the system. On the other hand, extending the time slice when only a limited number of tasks are active in the system contributes to an enhancement in the overall system throughput and a reduced amount of context switches. Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
This commit is contained in:
parent
303c4ea548
commit
bf98154ee1
@ -265,6 +265,7 @@ impl<'a> Scheduler<'a> {
|
||||
// Drain all the tasks from the queued list, update their vruntime (Self::update_enqueued()),
|
||||
// then push them all to the task pool (doing so will sort them by their vruntime).
|
||||
fn drain_queued_tasks(&mut self) {
|
||||
let slice_ns = self.bpf.get_effective_slice_us() * 1000;
|
||||
loop {
|
||||
match self.bpf.dequeue_task() {
|
||||
Ok(Some(task)) => {
|
||||
@ -292,7 +293,7 @@ impl<'a> Scheduler<'a> {
|
||||
task.sum_exec_runtime,
|
||||
task.weight,
|
||||
self.min_vruntime,
|
||||
self.slice_ns,
|
||||
slice_ns,
|
||||
);
|
||||
|
||||
// Insert task in the task pool (ordered by vruntime).
|
||||
@ -318,10 +319,28 @@ impl<'a> Scheduler<'a> {
|
||||
}
|
||||
}
|
||||
|
||||
// Dynamically adjust the time slice based on the amount of waiting tasks.
|
||||
fn scale_slice_ns(&mut self) {
|
||||
let nr_queued = *self.bpf.nr_queued_mut();
|
||||
let nr_scheduled = *self.bpf.nr_scheduled_mut();
|
||||
let nr_waiting = nr_queued + nr_scheduled;
|
||||
let nr_cpus = self.nr_cpus_online as u64;
|
||||
|
||||
// Scale time slice, but never scale below 1 ms.
|
||||
let scaling = nr_waiting / nr_cpus + 1;
|
||||
let slice_us = (self.slice_ns / scaling / 1000).max(1000);
|
||||
|
||||
// Apply new scaling.
|
||||
self.bpf.set_effective_slice_us(slice_us);
|
||||
}
|
||||
|
||||
// Dispatch tasks from the task pool in order (sending them to the BPF dispatcher).
|
||||
fn dispatch_tasks(&mut self) {
|
||||
let mut idle_cpus = self.get_idle_cpus();
|
||||
|
||||
// Adjust the dynamic time slice immediately before dispatching the tasks.
|
||||
self.scale_slice_ns();
|
||||
|
||||
// Dispatch only a batch of tasks equal to the amount of idle CPUs in the system.
|
||||
//
|
||||
// This allows to have more tasks sitting in the task pool, reducing the pressure on the
|
||||
@ -444,6 +463,9 @@ impl<'a> Scheduler<'a> {
|
||||
nr_waiting, nr_queued, nr_scheduled
|
||||
);
|
||||
|
||||
// Show current used time slice.
|
||||
info!("time slice = {} us", self.bpf.get_effective_slice_us());
|
||||
|
||||
// Show tasks that are currently running.
|
||||
let sched_cpu = match Self::get_current_cpu() {
|
||||
Ok(cpu_info) => cpu_info,
|
||||
|
Loading…
Reference in New Issue
Block a user