mirror of
https://github.com/sched-ext/scx.git
synced 2024-11-25 04:00:24 +00:00
scheds/c: Fix up C schedulers
Fix up the remaining C schedulers after the recent API and header updates. Also drop stray -p from usage help message from some schedulers.
This commit is contained in:
parent
9447cb27b2
commit
891df57b98
@ -20,7 +20,7 @@ const char help_fmt[] =
|
|||||||
"\n"
|
"\n"
|
||||||
"See the top-level comment in .bpf.c for more details.\n"
|
"See the top-level comment in .bpf.c for more details.\n"
|
||||||
"\n"
|
"\n"
|
||||||
"Usage: %s [-s SLICE_US] [-c CPU] [-p]\n"
|
"Usage: %s [-s SLICE_US] [-c CPU]\n"
|
||||||
"\n"
|
"\n"
|
||||||
" -s SLICE_US Override slice duration\n"
|
" -s SLICE_US Override slice duration\n"
|
||||||
" -c CPU Override the central CPU (default: 0)\n"
|
" -c CPU Override the central CPU (default: 0)\n"
|
||||||
|
@ -26,7 +26,7 @@ const char help_fmt[] =
|
|||||||
"\n"
|
"\n"
|
||||||
"See the top-level comment in .bpf.c for more details.\n"
|
"See the top-level comment in .bpf.c for more details.\n"
|
||||||
"\n"
|
"\n"
|
||||||
"Usage: %s [-s SLICE_US] [-i INTERVAL] [-f] [-p]\n"
|
"Usage: %s [-s SLICE_US] [-i INTERVAL] [-f]\n"
|
||||||
"\n"
|
"\n"
|
||||||
" -s SLICE_US Override slice duration\n"
|
" -s SLICE_US Override slice duration\n"
|
||||||
" -i INTERVAL Report interval\n"
|
" -i INTERVAL Report interval\n"
|
||||||
|
@ -59,7 +59,7 @@ u64 stats_primary_mask, stats_reserved_mask, stats_other_mask, stats_idle_mask;
|
|||||||
static s32 nr_reserved;
|
static s32 nr_reserved;
|
||||||
|
|
||||||
static u64 vtime_now;
|
static u64 vtime_now;
|
||||||
struct user_exit_info uei;
|
UEI_DEFINE(uei);
|
||||||
|
|
||||||
extern unsigned long CONFIG_HZ __kconfig;
|
extern unsigned long CONFIG_HZ __kconfig;
|
||||||
|
|
||||||
@ -236,15 +236,6 @@ s32 BPF_STRUCT_OPS(nest_select_cpu, struct task_struct *p, s32 prev_cpu,
|
|||||||
struct pcpu_ctx *pcpu_ctx;
|
struct pcpu_ctx *pcpu_ctx;
|
||||||
bool direct_to_primary = false, reset_impatient = true;
|
bool direct_to_primary = false, reset_impatient = true;
|
||||||
|
|
||||||
/*
|
|
||||||
* Don't bother trying to find an idle core if a task is doing an
|
|
||||||
* exec(). We would have already tried to find a core on fork(), and if
|
|
||||||
* we were successful in doing so, the task will already be running on
|
|
||||||
* what was previously an idle core.
|
|
||||||
*/
|
|
||||||
if (wake_flags & SCX_WAKE_EXEC)
|
|
||||||
return prev_cpu;
|
|
||||||
|
|
||||||
tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
|
tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
|
||||||
if (!tctx)
|
if (!tctx)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
@ -591,7 +582,7 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(nest_init)
|
|||||||
struct bpf_timer *timer;
|
struct bpf_timer *timer;
|
||||||
u32 key = 0;
|
u32 key = 0;
|
||||||
|
|
||||||
scx_bpf_switch_all();
|
__COMPAT_scx_bpf_switch_all();
|
||||||
|
|
||||||
err = scx_bpf_create_dsq(FALLBACK_DSQ_ID, NUMA_NO_NODE);
|
err = scx_bpf_create_dsq(FALLBACK_DSQ_ID, NUMA_NO_NODE);
|
||||||
if (err) {
|
if (err) {
|
||||||
@ -652,11 +643,10 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(nest_init)
|
|||||||
|
|
||||||
void BPF_STRUCT_OPS(nest_exit, struct scx_exit_info *ei)
|
void BPF_STRUCT_OPS(nest_exit, struct scx_exit_info *ei)
|
||||||
{
|
{
|
||||||
uei_record(&uei, ei);
|
UEI_RECORD(uei, ei);
|
||||||
}
|
}
|
||||||
|
|
||||||
SEC(".struct_ops.link")
|
SCX_OPS_DEFINE(nest_ops,
|
||||||
struct sched_ext_ops nest_ops = {
|
|
||||||
.select_cpu = (void *)nest_select_cpu,
|
.select_cpu = (void *)nest_select_cpu,
|
||||||
.enqueue = (void *)nest_enqueue,
|
.enqueue = (void *)nest_enqueue,
|
||||||
.dispatch = (void *)nest_dispatch,
|
.dispatch = (void *)nest_dispatch,
|
||||||
@ -667,5 +657,5 @@ struct sched_ext_ops nest_ops = {
|
|||||||
.init = (void *)nest_init,
|
.init = (void *)nest_init,
|
||||||
.exit = (void *)nest_exit,
|
.exit = (void *)nest_exit,
|
||||||
.flags = 0,
|
.flags = 0,
|
||||||
.name = "nest",
|
.name = "nest");
|
||||||
};
|
|
||||||
|
@ -187,12 +187,10 @@ int main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SCX_BUG_ON(scx_nest__load(skel), "Failed to load skel");
|
SCX_OPS_LOAD(skel, nest_ops, scx_nest, uei);
|
||||||
|
link = SCX_OPS_ATTACH(skel, nest_ops);
|
||||||
|
|
||||||
link = bpf_map__attach_struct_ops(skel->maps.nest_ops);
|
while (!exit_req && !UEI_EXITED(skel, uei)) {
|
||||||
SCX_BUG_ON(!link, "Failed to attach struct_ops");
|
|
||||||
|
|
||||||
while (!exit_req && !uei_exited(&skel->bss->uei)) {
|
|
||||||
u64 stats[NEST_STAT(NR)];
|
u64 stats[NEST_STAT(NR)];
|
||||||
enum nest_stat_idx i;
|
enum nest_stat_idx i;
|
||||||
enum nest_stat_group last_grp = -1;
|
enum nest_stat_group last_grp = -1;
|
||||||
@ -218,7 +216,7 @@ int main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bpf_link__destroy(link);
|
bpf_link__destroy(link);
|
||||||
uei_print(&skel->bss->uei);
|
UEI_REPORT(skel, uei);
|
||||||
scx_nest__destroy(skel);
|
scx_nest__destroy(skel);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -120,8 +120,6 @@
|
|||||||
|
|
||||||
char _license[] SEC("license") = "GPL";
|
char _license[] SEC("license") = "GPL";
|
||||||
|
|
||||||
const volatile bool switch_partial;
|
|
||||||
|
|
||||||
/* !0 for veristat, set during init */
|
/* !0 for veristat, set during init */
|
||||||
const volatile u32 nr_cpu_ids = 1;
|
const volatile u32 nr_cpu_ids = 1;
|
||||||
|
|
||||||
@ -239,7 +237,7 @@ u64 nr_total, nr_dispatched, nr_missing, nr_kicks, nr_preemptions;
|
|||||||
u64 nr_exps, nr_exp_waits, nr_exp_empty;
|
u64 nr_exps, nr_exp_waits, nr_exp_empty;
|
||||||
u64 nr_cgrp_next, nr_cgrp_coll, nr_cgrp_empty;
|
u64 nr_cgrp_next, nr_cgrp_coll, nr_cgrp_empty;
|
||||||
|
|
||||||
struct user_exit_info uei;
|
UEI_DEFINE(uei);
|
||||||
|
|
||||||
static bool time_before(u64 a, u64 b)
|
static bool time_before(u64 a, u64 b)
|
||||||
{
|
{
|
||||||
@ -600,27 +598,17 @@ void BPF_STRUCT_OPS(pair_cgroup_exit, struct cgroup *cgrp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s32 BPF_STRUCT_OPS(pair_init)
|
|
||||||
{
|
|
||||||
if (!switch_partial)
|
|
||||||
scx_bpf_switch_all();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void BPF_STRUCT_OPS(pair_exit, struct scx_exit_info *ei)
|
void BPF_STRUCT_OPS(pair_exit, struct scx_exit_info *ei)
|
||||||
{
|
{
|
||||||
uei_record(&uei, ei);
|
UEI_RECORD(uei, ei);
|
||||||
}
|
}
|
||||||
|
|
||||||
SEC(".struct_ops.link")
|
SCX_OPS_DEFINE(pair_ops,
|
||||||
struct sched_ext_ops pair_ops = {
|
|
||||||
.enqueue = (void *)pair_enqueue,
|
.enqueue = (void *)pair_enqueue,
|
||||||
.dispatch = (void *)pair_dispatch,
|
.dispatch = (void *)pair_dispatch,
|
||||||
.cpu_acquire = (void *)pair_cpu_acquire,
|
.cpu_acquire = (void *)pair_cpu_acquire,
|
||||||
.cpu_release = (void *)pair_cpu_release,
|
.cpu_release = (void *)pair_cpu_release,
|
||||||
.cgroup_init = (void *)pair_cgroup_init,
|
.cgroup_init = (void *)pair_cgroup_init,
|
||||||
.cgroup_exit = (void *)pair_cgroup_exit,
|
.cgroup_exit = (void *)pair_cgroup_exit,
|
||||||
.init = (void *)pair_init,
|
|
||||||
.exit = (void *)pair_exit,
|
.exit = (void *)pair_exit,
|
||||||
.name = "pair",
|
.name = "pair");
|
||||||
};
|
|
||||||
|
@ -20,10 +20,9 @@ const char help_fmt[] =
|
|||||||
"\n"
|
"\n"
|
||||||
"See the top-level comment in .bpf.c for more details.\n"
|
"See the top-level comment in .bpf.c for more details.\n"
|
||||||
"\n"
|
"\n"
|
||||||
"Usage: %s [-S STRIDE] [-p]\n"
|
"Usage: %s [-S STRIDE]\n"
|
||||||
"\n"
|
"\n"
|
||||||
" -S STRIDE Override CPU pair stride (default: nr_cpus_ids / 2)\n"
|
" -S STRIDE Override CPU pair stride (default: nr_cpus_ids / 2)\n"
|
||||||
" -p Switch only tasks on SCHED_EXT policy intead of all\n"
|
|
||||||
" -h Display this help and exit\n";
|
" -h Display this help and exit\n";
|
||||||
|
|
||||||
static volatile int exit_req;
|
static volatile int exit_req;
|
||||||
@ -58,9 +57,6 @@ int main(int argc, char **argv)
|
|||||||
case 'S':
|
case 'S':
|
||||||
stride = strtoul(optarg, NULL, 0);
|
stride = strtoul(optarg, NULL, 0);
|
||||||
break;
|
break;
|
||||||
case 'p':
|
|
||||||
skel->rodata->switch_partial = true;
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
fprintf(stderr, help_fmt, basename(argv[0]));
|
fprintf(stderr, help_fmt, basename(argv[0]));
|
||||||
return opt != 'h';
|
return opt != 'h';
|
||||||
@ -103,7 +99,7 @@ int main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
|
||||||
SCX_BUG_ON(scx_pair__load(skel), "Failed to load skel");
|
SCX_OPS_LOAD(skel, pair_ops, scx_pair, uei);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Populate the cgrp_q_arr map which is an array containing per-cgroup
|
* Populate the cgrp_q_arr map which is an array containing per-cgroup
|
||||||
@ -138,10 +134,9 @@ int main(int argc, char **argv)
|
|||||||
/*
|
/*
|
||||||
* Fully initialized, attach and run.
|
* Fully initialized, attach and run.
|
||||||
*/
|
*/
|
||||||
link = bpf_map__attach_struct_ops(skel->maps.pair_ops);
|
link = SCX_OPS_ATTACH(skel, pair_ops);
|
||||||
SCX_BUG_ON(!link, "Failed to attach struct_ops");
|
|
||||||
|
|
||||||
while (!exit_req && !uei_exited(&skel->bss->uei)) {
|
while (!exit_req && !UEI_EXITED(skel, uei)) {
|
||||||
printf("[SEQ %llu]\n", seq++);
|
printf("[SEQ %llu]\n", seq++);
|
||||||
printf(" total:%10" PRIu64 " dispatch:%10" PRIu64 " missing:%10" PRIu64 "\n",
|
printf(" total:%10" PRIu64 " dispatch:%10" PRIu64 " missing:%10" PRIu64 "\n",
|
||||||
skel->bss->nr_total,
|
skel->bss->nr_total,
|
||||||
@ -163,7 +158,7 @@ int main(int argc, char **argv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bpf_link__destroy(link);
|
bpf_link__destroy(link);
|
||||||
uei_print(&skel->bss->uei);
|
UEI_REPORT(skel, uei);
|
||||||
scx_pair__destroy(skel);
|
scx_pair__destroy(skel);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ const char help_fmt[] =
|
|||||||
"\n"
|
"\n"
|
||||||
"See the top-level comment in .bpf.c for more details.\n"
|
"See the top-level comment in .bpf.c for more details.\n"
|
||||||
"\n"
|
"\n"
|
||||||
"Usage: %s [-f] [-p]\n"
|
"Usage: %s [-f]\n"
|
||||||
"\n"
|
"\n"
|
||||||
" -f Use FIFO scheduling instead of weighted vtime scheduling\n"
|
" -f Use FIFO scheduling instead of weighted vtime scheduling\n"
|
||||||
" -h Display this help and exit\n";
|
" -h Display this help and exit\n";
|
||||||
|
@ -30,7 +30,6 @@
|
|||||||
|
|
||||||
char _license[] SEC("license") = "GPL";
|
char _license[] SEC("license") = "GPL";
|
||||||
|
|
||||||
const volatile bool switch_partial;
|
|
||||||
const volatile s32 usersched_pid;
|
const volatile s32 usersched_pid;
|
||||||
|
|
||||||
/* !0 for veristat, set during init */
|
/* !0 for veristat, set during init */
|
||||||
@ -56,7 +55,7 @@ volatile u64 nr_queued;
|
|||||||
*/
|
*/
|
||||||
volatile u64 nr_scheduled;
|
volatile u64 nr_scheduled;
|
||||||
|
|
||||||
struct user_exit_info uei;
|
UEI_DEFINE(uei);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The map containing tasks that are enqueued in user space from the kernel.
|
* The map containing tasks that are enqueued in user space from the kernel.
|
||||||
@ -324,18 +323,15 @@ s32 BPF_STRUCT_OPS(userland_init)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!switch_partial)
|
|
||||||
scx_bpf_switch_all();
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void BPF_STRUCT_OPS(userland_exit, struct scx_exit_info *ei)
|
void BPF_STRUCT_OPS(userland_exit, struct scx_exit_info *ei)
|
||||||
{
|
{
|
||||||
uei_record(&uei, ei);
|
UEI_RECORD(uei, ei);
|
||||||
}
|
}
|
||||||
|
|
||||||
SEC(".struct_ops.link")
|
SCX_OPS_DEFINE(userland_ops,
|
||||||
struct sched_ext_ops userland_ops = {
|
|
||||||
.select_cpu = (void *)userland_select_cpu,
|
.select_cpu = (void *)userland_select_cpu,
|
||||||
.enqueue = (void *)userland_enqueue,
|
.enqueue = (void *)userland_enqueue,
|
||||||
.dispatch = (void *)userland_dispatch,
|
.dispatch = (void *)userland_dispatch,
|
||||||
@ -343,6 +339,6 @@ struct sched_ext_ops userland_ops = {
|
|||||||
.init_task = (void *)userland_init_task,
|
.init_task = (void *)userland_init_task,
|
||||||
.init = (void *)userland_init,
|
.init = (void *)userland_init,
|
||||||
.exit = (void *)userland_exit,
|
.exit = (void *)userland_exit,
|
||||||
.flags = SCX_OPS_ENQ_LAST | SCX_OPS_KEEP_BUILTIN_IDLE,
|
.flags = SCX_OPS_ENQ_LAST |
|
||||||
.name = "userland",
|
SCX_OPS_KEEP_BUILTIN_IDLE,
|
||||||
};
|
.name = "userland");
|
||||||
|
@ -38,10 +38,9 @@ const char help_fmt[] =
|
|||||||
"\n"
|
"\n"
|
||||||
"Try to reduce `sysctl kernel.pid_max` if this program triggers OOMs.\n"
|
"Try to reduce `sysctl kernel.pid_max` if this program triggers OOMs.\n"
|
||||||
"\n"
|
"\n"
|
||||||
"Usage: %s [-b BATCH] [-p]\n"
|
"Usage: %s [-b BATCH]\n"
|
||||||
"\n"
|
"\n"
|
||||||
" -b BATCH The number of tasks to batch when dispatching (default: 8)\n"
|
" -b BATCH The number of tasks to batch when dispatching (default: 8)\n"
|
||||||
" -p Don't switch all, switch only tasks on SCHED_EXT policy\n"
|
|
||||||
" -h Display this help and exit\n";
|
" -h Display this help and exit\n";
|
||||||
|
|
||||||
/* Defined in UAPI */
|
/* Defined in UAPI */
|
||||||
@ -345,7 +344,6 @@ static void bootstrap(int argc, char **argv)
|
|||||||
struct sched_param sched_param = {
|
struct sched_param sched_param = {
|
||||||
.sched_priority = sched_get_priority_max(SCHED_EXT),
|
.sched_priority = sched_get_priority_max(SCHED_EXT),
|
||||||
};
|
};
|
||||||
bool switch_partial = false;
|
|
||||||
|
|
||||||
err = init_tasks();
|
err = init_tasks();
|
||||||
if (err)
|
if (err)
|
||||||
@ -370,9 +368,6 @@ static void bootstrap(int argc, char **argv)
|
|||||||
case 'b':
|
case 'b':
|
||||||
batch_size = strtoul(optarg, NULL, 0);
|
batch_size = strtoul(optarg, NULL, 0);
|
||||||
break;
|
break;
|
||||||
case 'p':
|
|
||||||
switch_partial = true;
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
fprintf(stderr, help_fmt, basename(argv[0]));
|
fprintf(stderr, help_fmt, basename(argv[0]));
|
||||||
exit(opt != 'h');
|
exit(opt != 'h');
|
||||||
@ -394,9 +389,8 @@ static void bootstrap(int argc, char **argv)
|
|||||||
assert(skel->rodata->num_possible_cpus > 0);
|
assert(skel->rodata->num_possible_cpus > 0);
|
||||||
skel->rodata->usersched_pid = getpid();
|
skel->rodata->usersched_pid = getpid();
|
||||||
assert(skel->rodata->usersched_pid > 0);
|
assert(skel->rodata->usersched_pid > 0);
|
||||||
skel->rodata->switch_partial = switch_partial;
|
|
||||||
|
|
||||||
SCX_BUG_ON(scx_userland__load(skel), "Failed to load skel");
|
SCX_OPS_LOAD(skel, userland_ops, scx_userland, uei);
|
||||||
|
|
||||||
enqueued_fd = bpf_map__fd(skel->maps.enqueued);
|
enqueued_fd = bpf_map__fd(skel->maps.enqueued);
|
||||||
dispatched_fd = bpf_map__fd(skel->maps.dispatched);
|
dispatched_fd = bpf_map__fd(skel->maps.dispatched);
|
||||||
@ -406,8 +400,7 @@ static void bootstrap(int argc, char **argv)
|
|||||||
SCX_BUG_ON(spawn_stats_thread(), "Failed to spawn stats thread");
|
SCX_BUG_ON(spawn_stats_thread(), "Failed to spawn stats thread");
|
||||||
|
|
||||||
print_example_warning(basename(argv[0]));
|
print_example_warning(basename(argv[0]));
|
||||||
ops_link = bpf_map__attach_struct_ops(skel->maps.userland_ops);
|
ops_link = SCX_OPS_ATTACH(skel, userland_ops);
|
||||||
SCX_BUG_ON(!ops_link, "Failed to attach struct_ops");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void sched_main_loop(void)
|
static void sched_main_loop(void)
|
||||||
@ -440,7 +433,7 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
exit_req = 1;
|
exit_req = 1;
|
||||||
bpf_link__destroy(ops_link);
|
bpf_link__destroy(ops_link);
|
||||||
uei_print(&skel->bss->uei);
|
UEI_REPORT(skel, uei);
|
||||||
scx_userland__destroy(skel);
|
scx_userland__destroy(skel);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user