common: Pull bpf_log2l() into helper function header

scx_lavd implemented 32 and 64 bit versions of a base-2 logarithm
function. This is now also used in rusty. To avoid code duplication,
let's pull it into a shared header.

Note that there is technically a functional change here as we remove the
always inline compiler directive. We instead assume that the compiler
will know best whether or not to inline the function.

Signed-off-by: David Vernet <void@manifault.com>
This commit is contained in:
David Vernet 2024-05-03 14:40:09 -05:00
parent efb97de785
commit 9bb8e9a548
No known key found for this signature in database
GPG Key ID: 59E4B86965C4F364
3 changed files with 32 additions and 49 deletions

View File

@ -231,6 +231,36 @@ BPF_PROG(name, ##args)
#define __contains(name, node) __attribute__((btf_decl_tag("contains:" #name ":" #node)))
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
/*
* bpf_log2 - Compute the base 2 logarithm of a 32-bit exponential value.
* @v: The value for which we're computing the base 2 logarithm.
*/
static inline u32 bpf_log2(u32 v)
{
u32 r;
u32 shift;
r = (v > 0xFFFF) << 4; v >>= r;
shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
shift = (v > 0xF) << 2; v >>= shift; r |= shift;
shift = (v > 0x3) << 1; v >>= shift; r |= shift;
r |= (v >> 1);
return r;
}
/*
* bpf_log2l - Compute the base 2 logarithm of a 64-bit exponential value.
* @v: The value for which we're computing the base 2 logarithm.
*/
static inline u32 bpf_log2l(u64 v)
{
u32 hi = v >> 32;
if (hi)
return bpf_log2(hi) + 32 + 1;
else
return bpf_log2(v) + 1;
}
void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;

View File

@ -398,28 +398,6 @@ static u16 get_nice_prio(struct task_struct *p);
static u64 get_task_load_ideal(struct task_struct *p);
static void adjust_slice_boost(struct cpu_ctx *cpuc, struct task_ctx *taskc);
static inline __attribute__((always_inline)) u32 bpf_log2(u32 v)
{
u32 r;
u32 shift;
r = (v > 0xFFFF) << 4; v >>= r;
shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
shift = (v > 0xF) << 2; v >>= shift; r |= shift;
shift = (v > 0x3) << 1; v >>= shift; r |= shift;
r |= (v >> 1);
return r;
}
static inline __attribute__((always_inline)) u32 bpf_log2l(u64 v)
{
u32 hi = v >> 32;
if (hi)
return bpf_log2(hi) + 32 + 1;
else
return bpf_log2(v) + 1;
}
static u64 sigmoid_u64(u64 v, u64 max)
{
/*

View File

@ -512,31 +512,6 @@ static void refresh_tune_params(void)
}
}
/*
* log2 helper functions taken from scx_lavd
*/
static inline __attribute__((always_inline)) u32 bpf_log2(u32 v)
{
u32 r;
u32 shift;
r = (v > 0xFFFF) << 4; v >>= r;
shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
shift = (v > 0xF) << 2; v >>= shift; r |= shift;
shift = (v > 0x3) << 1; v >>= shift; r |= shift;
r |= (v >> 1);
return r;
}
static inline __attribute__((always_inline)) u32 bpf_log2l(u64 v)
{
u32 hi = v >> 32;
if (hi)
return bpf_log2(hi) + 32 + 1;
else
return bpf_log2(v) + 1;
}
static u64 min(u64 a, u64 b)
{
return a <= b ? a : b;
@ -667,8 +642,8 @@ static u64 task_compute_dl(struct task_struct *p, struct task_ctx *taskc,
/*
* The above frequencies roughly follow an exponential distribution, so
* borrow the bpf_log2l() implementation from lavd to linearize it to a
* boost priority.
* use bpf_log2l() to linearize it to a boost priority that we can then
* scale to a weight factor below.
*/
lat_prio = bpf_log2l(freq_factor + 1);
lat_prio = min(lat_prio, DL_MAX_LAT_PRIO);