Merge pull request #337 from sched-ext/htejun/fix-layered-load

Bring rust scheduler's compat support to parity with C
This commit is contained in:
David Vernet 2024-06-06 19:57:52 -05:00 committed by GitHub
commit 5ad8d40713
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 41 additions and 13 deletions

View File

@ -118,7 +118,7 @@ if should_build_libbpf
endforeach
message('Fetching libbpf repo')
libbpf_commit = '6d3595d215b014d3eddb88038d686e1c20781534'
libbpf_commit = '42065ea6627ff6e1ab4c65e51042a70fbf30ff7c'
run_command(fetch_libbpf, meson.current_build_dir(), libbpf_commit, check: true)
make_jobs = 1
@ -174,7 +174,7 @@ endif
if should_build_bpftool
message('Fetching bpftool repo')
bpftool_commit = '20ce6933869b70bacfdd0dd1a8399199290bf8ff'
bpftool_commit = '42065ea6627ff6e1ab4c65e51042a70fbf30ff7c'
run_command(fetch_bpftool, meson.current_build_dir(), bpftool_commit, check: true)
bpftool_target = custom_target('bpftool_target',

View File

@ -127,11 +127,11 @@ pub fn struct_has_field(type_name: &str, field: &str) -> Result<bool> {
return Ok(false);
}
pub fn kfunc_exists(kfunc: &str) -> Result<bool> {
pub fn ksym_exists(ksym: &str) -> Result<bool> {
let btf: &btf = *VMLINUX_BTF;
let kfunc_name = CString::new(kfunc).unwrap();
let tid = unsafe { btf__find_by_name_kind(btf, kfunc_name.as_ptr(), BTF_KIND_FUNC) };
let ksym_name = CString::new(ksym).unwrap();
let tid = unsafe { btf__find_by_name(btf, ksym_name.as_ptr()) };
Ok(tid >= 0)
}
@ -204,12 +204,29 @@ macro_rules! scx_ops_load {
scx_utils::uei_set_size!($skel, $ops, $uei);
let ops = $skel.struct_ops.[<$ops _mut>]();
let has_field = scx_utils::compat::struct_has_field("sched_ext_ops", "exit_dump_len")?;
if !has_field && ops.exit_dump_len != 0 {
if !scx_utils::compat::struct_has_field("sched_ext_ops", "exit_dump_len")?
&& ops.exit_dump_len != 0 {
scx_utils::warn!("Kernel doesn't support setting exit dump len");
ops.exit_dump_len = 0;
}
if !scx_utils::compat::struct_has_field("sched_ext_ops", "tick")?
&& ops.tick != std::ptr::null_mut() {
scx_utils::warn!("Kernel doesn't support ops.tick()");
ops.tick = std::ptr::null_mut();
}
if !scx_utils::compat::struct_has_field("sched_ext_ops", "dump")?
&& (ops.dump != std::ptr::null_mut() ||
ops.dump_cpu != std::ptr::null_mut() ||
ops.dump_task != std::ptr::null_mut()) {
scx_utils::warn!("Kernel doesn't support ops.dump*()");
ops.dump = std::ptr::null_mut();
ops.dump_cpu = std::ptr::null_mut();
ops.dump_task = std::ptr::null_mut();
}
$skel.load().context("Failed to load BPF program")
}
}};
@ -252,8 +269,8 @@ mod tests {
}
#[test]
fn test_kfunc_exists() {
assert!(super::kfunc_exists("scx_bpf_consume").unwrap());
assert!(!super::kfunc_exists("NO_SUCH_KFUNC").unwrap());
fn test_ksym_exists() {
assert!(super::ksym_exists("scx_bpf_consume").unwrap());
assert!(!super::ksym_exists("NO_SUCH_KFUNC").unwrap());
}
}

View File

@ -214,8 +214,8 @@ static void refresh_cpumasks(int idx)
trace("LAYER[%d] now has %d cpus, seq=%llu", idx, layer->nr_cpus, layer->cpus_seq);
}
SEC("fentry/scheduler_tick")
int scheduler_tick_fentry(const void *ctx)
SEC("fentry")
int BPF_PROG(sched_tick_fentry)
{
int idx;

View File

@ -1328,7 +1328,7 @@ impl<'a, 'b> Scheduler<'a, 'b> {
perf_set |= layer.perf > 0;
}
if perf_set && !compat::kfunc_exists("scx_bpf_cpuperf_set")? {
if perf_set && !compat::ksym_exists("scx_bpf_cpuperf_set")? {
warn!("cpufreq support not available, ignoring perf configurations");
}
@ -1345,6 +1345,17 @@ impl<'a, 'b> Scheduler<'a, 'b> {
init_libbpf_logging(None);
let mut skel = scx_ops_open!(skel_builder, layered)?;
// scheduler_tick() got renamed to sched_tick() during v6.10-rc.
let sched_tick_name = match compat::ksym_exists("sched_tick")? {
true => "sched_tick",
false => "scheduler_tick",
};
skel.progs_mut()
.sched_tick_fentry()
.set_attach_target(0, Some(sched_tick_name.into()))
.context("Failed to set attach target for sched_tick_fentry()")?;
// Initialize skel according to @opts.
skel.struct_ops.layered_mut().exit_dump_len = opts.exit_dump_len;