mirror of
https://github.com/JakeHillion/scx.git
synced 2024-11-26 11:30:22 +00:00
scx_rusty: Make layer matching a global function
Layer matching currently takes a large number of bpf instructions. Moving layer matching to a global function will reduce the overall instruction count and allow for other layer matching methods such as glob. Signed-off-by: Daniel Hodges <hodges.daniel.scott@gmail.com>
This commit is contained in:
parent
9d808ae206
commit
be5213e129
@ -979,7 +979,8 @@ void BPF_STRUCT_OPS(layered_dispatch, s32 cpu, struct task_struct *prev)
|
||||
scx_bpf_consume(LO_FALLBACK_DSQ);
|
||||
}
|
||||
|
||||
static bool match_one(struct layer_match *match, struct task_struct *p, const char *cgrp_path)
|
||||
static __noinline bool match_one(struct layer_match *match,
|
||||
struct task_struct *p, const char *cgrp_path)
|
||||
{
|
||||
bool result = false;
|
||||
const struct cred *cred;
|
||||
@ -1025,15 +1026,23 @@ static bool match_one(struct layer_match *match, struct task_struct *p, const ch
|
||||
}
|
||||
}
|
||||
|
||||
static bool match_layer(struct layer *layer, struct task_struct *p, const char *cgrp_path)
|
||||
int match_layer(u32 layer_id, pid_t pid, const char *cgrp_path)
|
||||
{
|
||||
u32 nr_match_ors = layer->nr_match_ors;
|
||||
|
||||
struct task_struct *p;
|
||||
struct layer *layer;
|
||||
u32 nr_match_ors;
|
||||
u64 or_idx, and_idx;
|
||||
|
||||
if (nr_match_ors > MAX_LAYER_MATCH_ORS) {
|
||||
scx_bpf_error("too many ORs");
|
||||
return false;
|
||||
}
|
||||
p = bpf_task_from_pid(pid);
|
||||
if (!p)
|
||||
return -EINVAL;
|
||||
|
||||
if (layer_id >= nr_layers || layer->nr_match_ors > MAX_LAYER_MATCH_ORS)
|
||||
goto err;
|
||||
|
||||
layer = &layers[layer_id];
|
||||
nr_match_ors = layer->nr_match_ors;
|
||||
|
||||
bpf_for(or_idx, 0, nr_match_ors) {
|
||||
struct layer_match_ands *ands;
|
||||
@ -1041,33 +1050,39 @@ static bool match_layer(struct layer *layer, struct task_struct *p, const char *
|
||||
|
||||
barrier_var(or_idx);
|
||||
if (or_idx >= MAX_LAYER_MATCH_ORS)
|
||||
return false; /* can't happen */
|
||||
goto err;
|
||||
|
||||
ands = &layer->matches[or_idx];
|
||||
|
||||
if (ands->nr_match_ands > NR_LAYER_MATCH_KINDS) {
|
||||
scx_bpf_error("too many ANDs");
|
||||
return false;
|
||||
}
|
||||
if (ands->nr_match_ands > NR_LAYER_MATCH_KINDS)
|
||||
goto err;
|
||||
|
||||
bpf_for(and_idx, 0, ands->nr_match_ands) {
|
||||
struct layer_match *match;
|
||||
|
||||
barrier_var(and_idx);
|
||||
if (and_idx >= NR_LAYER_MATCH_KINDS)
|
||||
return false; /* can't happen */
|
||||
match = &ands->matches[and_idx];
|
||||
goto err;
|
||||
|
||||
match = &ands->matches[and_idx];
|
||||
if (!match_one(match, p, cgrp_path)) {
|
||||
matched = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (matched)
|
||||
return true;
|
||||
if (matched) {
|
||||
bpf_task_release(p);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
bpf_task_release(p);
|
||||
return -ENOENT;
|
||||
|
||||
err:
|
||||
bpf_task_release(p);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void maybe_refresh_layer(struct task_struct *p, struct task_ctx *tctx)
|
||||
@ -1075,6 +1090,7 @@ static void maybe_refresh_layer(struct task_struct *p, struct task_ctx *tctx)
|
||||
const char *cgrp_path;
|
||||
bool matched = false;
|
||||
u64 idx; // XXX - int makes verifier unhappy
|
||||
pid_t pid = p->pid;
|
||||
|
||||
if (!tctx->refresh_layer)
|
||||
return;
|
||||
@ -1087,7 +1103,7 @@ static void maybe_refresh_layer(struct task_struct *p, struct task_ctx *tctx)
|
||||
__sync_fetch_and_add(&layers[tctx->layer].nr_tasks, -1);
|
||||
|
||||
bpf_for(idx, 0, nr_layers) {
|
||||
if (match_layer(&layers[idx], p, cgrp_path)) {
|
||||
if (match_layer(idx, pid, cgrp_path) == 0) {
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
|
@ -8,6 +8,20 @@ struct {
|
||||
__uint(max_entries, 1);
|
||||
} cgrp_path_bufs SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(key_size, sizeof(u32));
|
||||
__uint(value_size, MAX_PATH);
|
||||
__uint(max_entries, 1);
|
||||
} prefix_bufs SEC(".maps");
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||
__uint(key_size, sizeof(u32));
|
||||
__uint(value_size, MAX_PATH);
|
||||
__uint(max_entries, 1);
|
||||
} str_bufs SEC(".maps");
|
||||
|
||||
static char *format_cgrp_path(struct cgroup *cgrp)
|
||||
{
|
||||
u32 zero = 0;
|
||||
@ -54,14 +68,44 @@ static char *format_cgrp_path(struct cgroup *cgrp)
|
||||
return path;
|
||||
}
|
||||
|
||||
static inline bool match_prefix(const char *prefix, const char *str, u32 max_len)
|
||||
bool __noinline match_prefix(const char *prefix, const char *str, u32 max_len)
|
||||
{
|
||||
int c;
|
||||
u32 c, zero = 0;
|
||||
int len;
|
||||
|
||||
if (!prefix || !str || max_len > MAX_PATH) {
|
||||
scx_bpf_error("invalid args: %s %s %u",
|
||||
prefix, str, max_len);
|
||||
return false;
|
||||
}
|
||||
|
||||
char *pre_buf = bpf_map_lookup_elem(&prefix_bufs, &zero);
|
||||
char *str_buf = bpf_map_lookup_elem(&str_bufs, &zero);
|
||||
if (!pre_buf || !str_buf) {
|
||||
scx_bpf_error("failed to look up buf");
|
||||
return false;
|
||||
}
|
||||
|
||||
len = bpf_probe_read_kernel_str(pre_buf, MAX_PATH, prefix);
|
||||
if (len < 0) {
|
||||
scx_bpf_error("failed to read prefix");
|
||||
return false;
|
||||
}
|
||||
|
||||
len = bpf_probe_read_kernel_str(str_buf, MAX_PATH, str);
|
||||
if (len < 0) {
|
||||
scx_bpf_error("failed to read str");
|
||||
return false;
|
||||
}
|
||||
|
||||
bpf_for(c, 0, max_len) {
|
||||
if (prefix[c] == '\0')
|
||||
if (c > len) {
|
||||
scx_bpf_error("invalid length");
|
||||
return false; /* appease the verifier */
|
||||
}
|
||||
if (pre_buf[c] == '\0')
|
||||
return true;
|
||||
if (str[c] != prefix[c])
|
||||
if (str_buf[c] != pre_buf[c])
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
|
Loading…
Reference in New Issue
Block a user