Merge branch 'main' of https://github.com/sched-ext/scx into core_enums

This commit is contained in:
Emil Tsalapatis 2024-11-06 12:44:23 -08:00
commit 42880404e1
22 changed files with 597 additions and 432 deletions

22
Cargo.lock generated
View File

@ -1685,7 +1685,7 @@ dependencies = [
[[package]]
name = "scx_bpfland"
version = "1.0.5"
version = "1.0.6"
dependencies = [
"anyhow",
"clap",
@ -1702,7 +1702,7 @@ dependencies = [
[[package]]
name = "scx_lavd"
version = "1.0.5"
version = "1.0.6"
dependencies = [
"anyhow",
"bitvec",
@ -1728,7 +1728,7 @@ dependencies = [
[[package]]
name = "scx_layered"
version = "1.0.5"
version = "1.0.6"
dependencies = [
"anyhow",
"bitvec",
@ -1752,7 +1752,7 @@ dependencies = [
[[package]]
name = "scx_loader"
version = "1.0.5"
version = "1.0.6"
dependencies = [
"anyhow",
"clap",
@ -1792,7 +1792,7 @@ dependencies = [
[[package]]
name = "scx_rlfifo"
version = "1.0.5"
version = "1.0.6"
dependencies = [
"anyhow",
"ctrlc",
@ -1805,7 +1805,7 @@ dependencies = [
[[package]]
name = "scx_rustland"
version = "1.0.5"
version = "1.0.6"
dependencies = [
"anyhow",
"clap",
@ -1826,7 +1826,7 @@ dependencies = [
[[package]]
name = "scx_rustland_core"
version = "2.2.2"
version = "2.2.3"
dependencies = [
"anyhow",
"libbpf-rs",
@ -1839,7 +1839,7 @@ dependencies = [
[[package]]
name = "scx_rusty"
version = "1.0.5"
version = "1.0.6"
dependencies = [
"anyhow",
"chrono",
@ -1862,7 +1862,7 @@ dependencies = [
[[package]]
name = "scx_stats"
version = "1.0.5"
version = "1.0.6"
dependencies = [
"anyhow",
"crossbeam",
@ -1879,7 +1879,7 @@ dependencies = [
[[package]]
name = "scx_stats_derive"
version = "1.0.5"
version = "1.0.6"
dependencies = [
"proc-macro2",
"quote",
@ -1890,7 +1890,7 @@ dependencies = [
[[package]]
name = "scx_utils"
version = "1.0.5"
version = "1.0.6"
dependencies = [
"anyhow",
"bindgen",

View File

@ -1,5 +1,5 @@
project('sched_ext schedulers', 'c',
version: '1.0.5',
version: '1.0.6',
license: 'GPL-2.0',
meson_version : '>= 1.2.0',)

View File

@ -1,6 +1,6 @@
[package]
name = "scx_loader"
version = "1.0.5"
version = "1.0.6"
authors = ["Vladislav Nepogodin <vnepogodin@cachyos.org>"]
edition = "2021"
description = "DBUS on-demand loader of sched-ext schedulers"

View File

@ -1,6 +1,6 @@
[package]
name = "scx_rustland_core"
version = "2.2.2"
version = "2.2.3"
edition = "2021"
authors = ["Andrea Righi <andrea.righi@linux.dev>"]
license = "GPL-2.0-only"
@ -12,12 +12,12 @@ anyhow = "1.0.65"
plain = "0.2.3"
libbpf-rs = "0.24.1"
libc = "0.2.137"
scx_utils = { path = "../scx_utils", version = "1.0.5" }
scx_utils = { path = "../scx_utils", version = "1.0.6" }
[build-dependencies]
tar = "0.4"
walkdir = "2.4"
scx_utils = { path = "../scx_utils", version = "1.0.5" }
scx_utils = { path = "../scx_utils", version = "1.0.6" }
[lib]
name = "scx_rustland_core"

View File

@ -1,6 +1,6 @@
[package]
name = "scx_stats"
version = "1.0.5"
version = "1.0.6"
edition = "2021"
authors = ["Tejun Heo <tj@kernel.org>"]
license = "GPL-2.0-only"

View File

@ -1,6 +1,6 @@
[package]
name = "scx_stats_derive"
version = "1.0.5"
version = "1.0.6"
edition = "2021"
authors = ["Tejun Heo <tj@kernel.org>"]
license = "GPL-2.0-only"
@ -13,6 +13,6 @@ proc-macro = true
[dependencies]
proc-macro2 = "1.0"
quote = "1.0"
scx_stats = { path = "..", version = "1.0.5" }
scx_stats = { path = "..", version = "1.0.6" }
serde_json = "1.0"
syn = { version = "2.0", features = ["extra-traits", "full"] }

View File

@ -1,6 +1,6 @@
[package]
name = "scx_utils"
version = "1.0.5"
version = "1.0.6"
edition = "2021"
authors = ["Tejun Heo <tj@kernel.org>"]
license = "GPL-2.0-only"
@ -22,7 +22,7 @@ log = "0.4.17"
nvml-wrapper = { version = "0.10.0", optional = true }
paste = "1.0"
regex = "1.10"
scx_stats = { path = "../scx_stats", version = "1.0.5" }
scx_stats = { path = "../scx_stats", version = "1.0.6" }
serde = { version = "1.0", features = ["derive"] }
sscanf = "0.4"
tar = "0.4"

View File

@ -1,6 +1,6 @@
[package]
name = "scx_bpfland"
version = "1.0.5"
version = "1.0.6"
authors = ["Andrea Righi <andrea.righi@linux.dev>"]
edition = "2021"
description = "A vruntime-based sched_ext scheduler that prioritizes interactive workloads. https://github.com/sched-ext/scx/tree/main"
@ -13,14 +13,14 @@ clap = { version = "4.1", features = ["derive", "env", "unicode", "wrap_help"] }
crossbeam = "0.8.4"
libbpf-rs = "0.24.1"
log = "0.4.17"
scx_stats = { path = "../../../rust/scx_stats", version = "1.0.5" }
scx_stats_derive = { path = "../../../rust/scx_stats/scx_stats_derive", version = "1.0.5" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_stats = { path = "../../../rust/scx_stats", version = "1.0.6" }
scx_stats_derive = { path = "../../../rust/scx_stats/scx_stats_derive", version = "1.0.6" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
serde = { version = "1.0", features = ["derive"] }
simplelog = "0.12"
[build-dependencies]
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
[features]
enable_backtrace = []

View File

@ -13,6 +13,7 @@
#define MAX(x, y) ((x) > (y) ? (x) : (y))
#define MIN(x, y) ((x) < (y) ? (x) : (y))
#define CLAMP(val, lo, hi) MIN(MAX(val, lo), hi)
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
enum consts {
NSEC_PER_USEC = 1000ULL,

File diff suppressed because it is too large Load Diff

View File

@ -140,15 +140,6 @@ struct Opts {
#[clap(short = 'l', long, allow_hyphen_values = true, default_value = "20000")]
slice_us_lag: i64,
/// With lowlatency enabled, instead of classifying tasks as interactive or non-interactive,
/// they all get a dynamic priority, which is adjusted in function of their average rate of
/// voluntary context switches.
///
/// This option guarantess less spikey behavior and it can be particularly useful in soft
/// real-time scenarios, such as audio processing, multimedia, etc.
#[clap(short = 'L', long, action = clap::ArgAction::SetTrue)]
lowlatency: bool,
/// Enable kthreads prioritization.
///
/// Enabling this can improve system performance, but it may also introduce interactivity
@ -183,11 +174,6 @@ struct Opts {
#[clap(short = 'c', long, default_value = "10")]
nvcsw_max_thresh: u64,
/// Prevent starvation by making sure that at least one lower priority task is scheduled every
/// starvation_thresh_us (0 = disable starvation prevention).
#[clap(short = 't', long, default_value = "1000")]
starvation_thresh_us: u64,
/// Enable stats monitoring with the specified interval.
#[clap(long)]
stats: Option<f64>,
@ -261,12 +247,10 @@ impl<'a> Scheduler<'a> {
// Override default BPF scheduling parameters.
skel.maps.rodata_data.debug = opts.debug;
skel.maps.rodata_data.smt_enabled = smt_enabled;
skel.maps.rodata_data.lowlatency = opts.lowlatency;
skel.maps.rodata_data.local_kthreads = opts.local_kthreads;
skel.maps.rodata_data.slice_max = opts.slice_us * 1000;
skel.maps.rodata_data.slice_min = opts.slice_us_min * 1000;
skel.maps.rodata_data.slice_lag = opts.slice_us_lag * 1000;
skel.maps.rodata_data.starvation_thresh_ns = opts.starvation_thresh_us * 1000;
skel.maps.rodata_data.nvcsw_max_thresh = opts.nvcsw_max_thresh;
// Load the BPF program for validation.
@ -558,11 +542,8 @@ impl<'a> Scheduler<'a> {
nr_running: self.skel.maps.bss_data.nr_running,
nr_cpus: self.skel.maps.bss_data.nr_online_cpus,
nr_interactive: self.skel.maps.bss_data.nr_interactive,
nr_prio_waiting: self.skel.maps.bss_data.nr_prio_waiting,
nr_shared_waiting: self.skel.maps.bss_data.nr_shared_waiting,
nr_kthread_dispatches: self.skel.maps.bss_data.nr_kthread_dispatches,
nr_direct_dispatches: self.skel.maps.bss_data.nr_direct_dispatches,
nr_prio_dispatches: self.skel.maps.bss_data.nr_prio_dispatches,
nr_shared_dispatches: self.skel.maps.bss_data.nr_shared_dispatches,
}
}

View File

@ -21,16 +21,10 @@ pub struct Metrics {
pub nr_cpus: u64,
#[stat(desc = "Number of running interactive tasks")]
pub nr_interactive: u64,
#[stat(desc = "Average amount of regular tasks waiting to be dispatched")]
pub nr_shared_waiting: u64,
#[stat(desc = "Average amount of interactive tasks waiting to be dispatched")]
pub nr_prio_waiting: u64,
#[stat(desc = "Number of kthread direct dispatches")]
pub nr_kthread_dispatches: u64,
#[stat(desc = "Number of task direct dispatches")]
pub nr_direct_dispatches: u64,
#[stat(desc = "Number of interactive task dispatches")]
pub nr_prio_dispatches: u64,
#[stat(desc = "Number of regular task dispatches")]
pub nr_shared_dispatches: u64,
}
@ -39,16 +33,13 @@ impl Metrics {
fn format<W: Write>(&self, w: &mut W) -> Result<()> {
writeln!(
w,
"[{}] tasks -> r: {:>2}/{:<2} i: {:<2} pw: {:<4} w: {:<4} | dispatch -> k: {:<5} d: {:<5} p: {:<5} s: {:<5}",
"[{}] tasks -> r: {:>2}/{:<2} i: {:<2} | dispatch -> k: {:<5} d: {:<5} s: {:<5}",
crate::SCHEDULER_NAME,
self.nr_running,
self.nr_cpus,
self.nr_interactive,
self.nr_prio_waiting,
self.nr_shared_waiting,
self.nr_kthread_dispatches,
self.nr_direct_dispatches,
self.nr_prio_dispatches,
self.nr_shared_dispatches
)?;
Ok(())
@ -58,7 +49,6 @@ impl Metrics {
Self {
nr_kthread_dispatches: self.nr_kthread_dispatches - rhs.nr_kthread_dispatches,
nr_direct_dispatches: self.nr_direct_dispatches - rhs.nr_direct_dispatches,
nr_prio_dispatches: self.nr_prio_dispatches - rhs.nr_prio_dispatches,
nr_shared_dispatches: self.nr_shared_dispatches - rhs.nr_shared_dispatches,
..self.clone()
}

View File

@ -1,6 +1,6 @@
[package]
name = "scx_lavd"
version = "1.0.5"
version = "1.0.6"
authors = ["Changwoo Min <changwoo@igalia.com>", "Igalia"]
edition = "2021"
description = "A Latency-criticality Aware Virtual Deadline (LAVD) scheduler based on sched_ext, which is a Linux kernel feature which enables implementing kernel thread schedulers in BPF and dynamically loading them. https://github.com/sched-ext/scx/tree/main"
@ -19,9 +19,9 @@ libbpf-rs = "0.24.1"
libc = "0.2.137"
log = "0.4.17"
ordered-float = "3.4.0"
scx_stats = { path = "../../../rust/scx_stats", version = "1.0.5" }
scx_stats_derive = { path = "../../../rust/scx_stats/scx_stats_derive", version = "1.0.5" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_stats = { path = "../../../rust/scx_stats", version = "1.0.6" }
scx_stats_derive = { path = "../../../rust/scx_stats/scx_stats_derive", version = "1.0.6" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
serde = { version = "1.0", features = ["derive"] }
simplelog = "0.12"
static_assertions = "1.1.0"
@ -29,7 +29,7 @@ plain = "0.2.3"
gpoint = "0.2"
[build-dependencies]
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
[features]
enable_backtrace = []

View File

@ -27,6 +27,7 @@ enum consts_internal {
LAVD_LC_RUNTIME_MAX = LAVD_TIME_ONE_SEC,
LAVD_LC_WEIGHT_BOOST = 128, /* 2^7 */
LAVD_LC_GREEDY_PENALTY = 20, /* 20% */
LAVD_LC_FREQ_OVER_RUNTIME = 100, /* 100x */
LAVD_SLICE_BOOST_MAX_FT = 3, /* maximum additional 3x of slice */
LAVD_SLICE_BOOST_MAX_STEP = 6, /* 6 slice exhausitions in a row */

View File

@ -259,7 +259,7 @@ static u64 calc_runtime_factor(u64 runtime, u64 weight_ft)
static u64 calc_freq_factor(u64 freq, u64 weight_ft)
{
u64 ft = sigmoid_u64(freq, LAVD_LC_FREQ_MAX);
return (ft * weight_ft) + 1;
return (ft * weight_ft * LAVD_LC_FREQ_OVER_RUNTIME) + 1;
}
static u64 calc_weight_factor(struct task_struct *p, struct task_ctx *taskc,
@ -367,7 +367,7 @@ static void calc_virtual_deadline_delta(struct task_struct *p,
greedy_ratio = calc_greedy_ratio(taskc);
greedy_ft = calc_greedy_factor(greedy_ratio);
deadline = (taskc->run_time_ns / lat_cri) * greedy_ft;
deadline = (LAVD_SLICE_MAX_NS / lat_cri) * greedy_ft;
taskc->vdeadline_delta_ns = deadline;
}

View File

@ -215,9 +215,6 @@ null_out:
static bool try_kick_cpu(struct cpu_ctx *victim_cpuc, u64 victim_last_kick_clk)
{
/*
* Kick a victim CPU if it is not victimized yet by another
* concurrent kick task.
*
* Kicking the victim CPU does _not_ guarantee that task @p will run on
* that CPU. Enqueuing @p to the global queue is one operation, and
* kicking the victim is another asynchronous operation. However, it is
@ -226,9 +223,32 @@ static bool try_kick_cpu(struct cpu_ctx *victim_cpuc, u64 victim_last_kick_clk)
*/
bool ret;
/*
* If the current CPU is a victim, we just reset the current task's
* time slice as an optimization. Othewise, kick the remote CPU for
* preemption.
*
* Resetting task's time slice to zero does not trigger an immediate
* preemption. However, the cost of self-IPI is prohibitively expensive
* for some scenarios. The actual preemption will happen at the next
* ops.tick().
*/
if (bpf_get_smp_processor_id() == victim_cpuc->cpu_id) {
struct task_struct *tsk = bpf_get_current_task_btf();
tsk->scx.slice = 0;
return true;
}
/*
* Kick a victim CPU if it is not victimized yet by another
* concurrent kick task.
*/
ret = __sync_bool_compare_and_swap(&victim_cpuc->last_kick_clk,
victim_last_kick_clk,
bpf_ktime_get_ns());
/*
* Kick the remote CPU for preemption.
*/
if (ret)
scx_bpf_kick_cpu(victim_cpuc->cpu_id, SCX_KICK_PREEMPT);

View File

@ -1,6 +1,6 @@
[package]
name = "scx_layered"
version = "1.0.5"
version = "1.0.6"
authors = ["Tejun Heo <htejun@meta.com>", "Meta"]
edition = "2021"
description = "A highly configurable multi-layer BPF / user space hybrid scheduler used within sched_ext, which is a Linux kernel feature which enables implementing kernel thread schedulers in BPF and dynamically loading them. https://github.com/sched-ext/scx/tree/main"
@ -19,15 +19,15 @@ lazy_static = "1.4"
libbpf-rs = "0.24.1"
libc = "0.2.137"
log = "0.4.17"
scx_stats = { path = "../../../rust/scx_stats", version = "1.0.5" }
scx_stats_derive = { path = "../../../rust/scx_stats/scx_stats_derive", version = "1.0.5" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_stats = { path = "../../../rust/scx_stats", version = "1.0.6" }
scx_stats_derive = { path = "../../../rust/scx_stats/scx_stats_derive", version = "1.0.6" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
simplelog = "0.12"
[build-dependencies]
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
[features]
enable_backtrace = []

View File

@ -19,13 +19,13 @@ libbpf-rs = "0.24.1"
libc = "0.2.137"
log = "0.4.17"
maplit = "1.0.2"
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
simplelog = "0.12"
[build-dependencies]
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
[features]
enable_backtrace = []

View File

@ -236,6 +236,91 @@ static inline const struct cpumask *lookup_cell_cpumask(int idx)
return (const struct cpumask *)cpumaskw->cpumask;
}
/*
* This is an RCU-like implementation to keep track of scheduling events so we
* can establish when cell assignments have propagated completely.
*/
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
__type(key, u32);
__type(value, u32);
__uint(max_entries, 1);
} percpu_critical_sections SEC(".maps");
/* Same implementation for enter/exit */
static __always_inline int critical_section()
{
u32 zero = 0;
u32 *data;
if (!(data = bpf_map_lookup_elem(&percpu_critical_sections, &zero))) {
scx_bpf_error("no percpu_critical_sections");
return -1;
}
/*
* Bump the counter, the LSB indicates we are in a critical section and the
* rest of the bits keep track of how many critical sections.
*/
WRITE_ONCE(*data, *data + 1);
return 0;
}
#define critical_section_enter() critical_section()
#define critical_section_exit() critical_section()
u32 critical_section_state[MAX_CPUS];
/*
* Write side will record the current state and then poll to check that the
* generation has advanced (somewhat like call_rcu)
*/
static __always_inline int critical_section_record()
{
u32 zero = 0;
u32 *data;
int nr_cpus = nr_possible_cpus;
if (nr_cpus > MAX_CPUS)
nr_cpus = MAX_CPUS;
for (int i = 0; i < nr_cpus; ++i) {
if (!(data = bpf_map_lookup_percpu_elem(
&percpu_critical_sections, &zero, i))) {
scx_bpf_error("no percpu_critical_sections");
return -1;
}
critical_section_state[i] = READ_ONCE(*data);
}
return 0;
}
static __always_inline int critical_section_poll()
{
u32 zero = 0;
u32 *data;
int nr_cpus = nr_possible_cpus;
if (nr_cpus > MAX_CPUS)
nr_cpus = MAX_CPUS;
for (int i = 0; i < nr_cpus; ++i) {
/* If not in a critical section at the time of record, then it passes */
if (!(critical_section_state[i] & 1))
continue;
if (!(data = bpf_map_lookup_percpu_elem(
&percpu_critical_sections, &zero, i))) {
scx_bpf_error("no percpu_critical_sections");
return -1;
}
if (READ_ONCE(*data) == critical_section_state[i])
return 1;
}
return 0;
}
/*
* Along with a user_global_seq bump, indicates that cgroup->cell assignment
* changed
@ -265,6 +350,16 @@ int BPF_PROG(sched_tick_fentry)
* scheduler tick. This is a crude way of mimicing RCU synchronization.
*/
if (READ_ONCE(draining)) {
if (critical_section_poll())
return 0;
/* FIXME: If a cell is being destroyed, we need to make sure that dsq is
* drained before removing it from all the cpus
*
* Additionally, the handling of pinned tasks is broken here - we send
* them to a cell DSQ if there's overlap of the cell's CPUs and the
* task's cpumask but if the cell's CPU change we might stall the
* task indefinitely.
*/
bpf_for(cpu_idx, 0, nr_possible_cpus)
{
if (!(cpu_ctx = lookup_cpu_ctx(cpu_idx)))
@ -423,6 +518,11 @@ int BPF_PROG(sched_tick_fentry)
/* Bump the global seq last to ensure that prior stores are now visible. This synchronizes with the read of global_seq */
barrier();
WRITE_ONCE(global_seq, global_seq + 1);
/*
* On subsequent ticks we'll check that all in-flight enqueues are done so
* we can clear the prev_cell for each cpu. Record the state here.
*/
critical_section_record();
return 0;
}
@ -612,8 +712,17 @@ s32 BPF_STRUCT_OPS(mitosis_select_cpu, struct task_struct *p, s32 prev_cpu,
if (!(cctx = lookup_cpu_ctx(-1)) || !(tctx = lookup_task_ctx(p)))
return prev_cpu;
if (maybe_refresh_cell(p, tctx) < 0)
return prev_cpu;
/*
* This is a lightweight (RCU-like) critical section covering from when we
* refresh cell information to when we enqueue onto the task's assigned
* cell's DSQ. This allows us to publish new cell assignments and establish
* a point at which all future enqueues will be on the new assignments.
*/
critical_section_enter();
if (maybe_refresh_cell(p, tctx) < 0) {
cpu = prev_cpu;
goto out;
}
if ((cpu = pick_idle_cpu(p, prev_cpu, cctx, tctx)) >= 0) {
cstat_inc(CSTAT_LOCAL, tctx->cell, cctx);
@ -623,10 +732,12 @@ s32 BPF_STRUCT_OPS(mitosis_select_cpu, struct task_struct *p, s32 prev_cpu,
scx_bpf_error(
"select_cpu returned cpu %d belonging to cell %d but task belongs to cell %d",
cpu, cctx->cell, tctx->cell);
return cpu;
goto out;
}
return prev_cpu;
cpu = prev_cpu;
out:
critical_section_exit();
}
static __always_inline bool pick_idle_cpu_and_kick(struct task_struct *p,
@ -662,11 +773,18 @@ void BPF_STRUCT_OPS(mitosis_enqueue, struct task_struct *p, u64 enq_flags)
if (!(cctx = lookup_cpu_ctx(-1)) || !(tctx = lookup_task_ctx(p)))
return;
/*
* This is a lightweight (RCU-like) critical section covering from when we
* refresh cell information to when we enqueue onto the task's assigned
* cell's DSQ. This allows us to publish new cell assignments and establish
* a point at which all future enqueues will be on the new assignments.
*/
critical_section_enter();
if (maybe_refresh_cell(p, tctx) < 0)
return;
goto out;
if (!(cell = lookup_cell(tctx->cell)))
return;
goto out;
/*
* Limit the amount of budget that an idling task can accumulate
@ -690,6 +808,8 @@ void BPF_STRUCT_OPS(mitosis_enqueue, struct task_struct *p, u64 enq_flags)
*/
if (!(enq_flags & SCX_ENQ_WAKEUP))
pick_idle_cpu_and_kick(p, task_cpu, cctx, tctx);
out:
critical_section_exit();
}
void BPF_STRUCT_OPS(mitosis_dispatch, s32 cpu, struct task_struct *prev)

View File

@ -1,6 +1,6 @@
[package]
name = "scx_rlfifo"
version = "1.0.5"
version = "1.0.6"
authors = ["Andrea Righi <andrea.righi@linux.dev>"]
edition = "2021"
description = "A simple FIFO scheduler in Rust that runs in user-space"
@ -12,12 +12,12 @@ plain = "0.2.3"
ctrlc = { version = "3.1", features = ["termination"] }
libbpf-rs = "0.24.1"
libc = "0.2.137"
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_rustland_core = { path = "../../../rust/scx_rustland_core", version = "2.2.2" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
scx_rustland_core = { path = "../../../rust/scx_rustland_core", version = "2.2.3" }
[build-dependencies]
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_rustland_core = { path = "../../../rust/scx_rustland_core", version = "2.2.2" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
scx_rustland_core = { path = "../../../rust/scx_rustland_core", version = "2.2.3" }
[features]
enable_backtrace = []

View File

@ -1,6 +1,6 @@
[package]
name = "scx_rustland"
version = "1.0.5"
version = "1.0.6"
authors = ["Andrea Righi <andrea.righi@linux.dev>"]
edition = "2021"
description = "A BPF component (dispatcher) that implements the low level sched-ext functionalities and a user-space counterpart (scheduler), written in Rust, that implements the actual scheduling policy. This is used within sched_ext, which is a Linux kernel feature which enables implementing kernel thread schedulers in BPF and dynamically loading them. https://github.com/sched-ext/scx/tree/main"
@ -17,15 +17,15 @@ libc = "0.2.137"
log = "0.4.17"
ordered-float = "3.4.0"
serde = { version = "1.0", features = ["derive"] }
scx_stats = { path = "../../../rust/scx_stats", version = "1.0.5" }
scx_stats_derive = { path = "../../../rust/scx_stats/scx_stats_derive", version = "1.0.5" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_rustland_core = { path = "../../../rust/scx_rustland_core", version = "2.2.2" }
scx_stats = { path = "../../../rust/scx_stats", version = "1.0.6" }
scx_stats_derive = { path = "../../../rust/scx_stats/scx_stats_derive", version = "1.0.6" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
scx_rustland_core = { path = "../../../rust/scx_rustland_core", version = "2.2.3" }
simplelog = "0.12"
[build-dependencies]
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_rustland_core = { path = "../../../rust/scx_rustland_core", version = "2.2.2" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
scx_rustland_core = { path = "../../../rust/scx_rustland_core", version = "2.2.3" }
[features]
enable_backtrace = []

View File

@ -1,6 +1,6 @@
[package]
name = "scx_rusty"
version = "1.0.5"
version = "1.0.6"
authors = ["Dan Schatzberg <dschatzberg@meta.com>", "Meta"]
edition = "2021"
description = "A multi-domain, BPF / user space hybrid scheduler used within sched_ext, which is a Linux kernel feature which enables implementing kernel thread schedulers in BPF and dynamically loading them. https://github.com/sched-ext/scx/tree/main"
@ -17,16 +17,16 @@ libbpf-rs = "0.24.1"
libc = "0.2.137"
log = "0.4.17"
ordered-float = "3.4.0"
scx_stats = { path = "../../../rust/scx_stats", version = "1.0.5" }
scx_stats_derive = { path = "../../../rust/scx_stats/scx_stats_derive", version = "1.0.5" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_stats = { path = "../../../rust/scx_stats", version = "1.0.6" }
scx_stats_derive = { path = "../../../rust/scx_stats/scx_stats_derive", version = "1.0.6" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
serde = { version = "1.0", features = ["derive"] }
simplelog = "0.12"
sorted-vec = "0.8.3"
static_assertions = "1.1.0"
[build-dependencies]
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.5" }
scx_utils = { path = "../../../rust/scx_utils", version = "1.0.6" }
[features]
enable_backtrace = []