scx_lavd: Adding READ_ONCE()/WRITE_ONCE() macros

In order to prevent compiler from merging or refetching load/store
operations or unwanted reordering, we take the implemetation of
READ_ONCE()/WRITE_ONCE() from kernel sources under
"/include/asm-generic/rwonce.h".

Use WRITE_ONCE() in function flip_sys_cpu_util() to ensure the compiler
doesn't perform unnecessary optimization so the compiler won't make
incorrect assumptions when performing the operation of modifying of bit
 flipping.

Signed-off-by: I Hsin Cheng <richard120310@gmail.com>
This commit is contained in:
I Hsin Cheng 2024-05-27 15:07:29 +08:00
parent 6b53adb5d3
commit 0921fde1f1
2 changed files with 59 additions and 4 deletions

View File

@ -278,6 +278,61 @@ static inline u32 bpf_log2l(u64 v)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define __maybe_unused __attribute__((__unused__))
/*
* The folloing functions are taken from kernel sources
* under /include/asm-generic/rwonce.h in order to prevent
* compiler from refetching reads or writes, also forbids
* compiler from reordering successive reads or writes.
*/
typedef __u8 __attribute__((__may_alias__)) __u8_alias_t;
typedef __u16 __attribute__((__may_alias__)) __u16_alias_t;
typedef __u32 __attribute__((__may_alias__)) __u32_alias_t;
typedef __u64 __attribute__((__may_alias__)) __u64_alias_t;
static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(__u8_alias_t *) res = *(volatile __u8_alias_t *) p; break;
case 2: *(__u16_alias_t *) res = *(volatile __u16_alias_t *) p; break;
case 4: *(__u32_alias_t *) res = *(volatile __u32_alias_t *) p; break;
case 8: *(__u64_alias_t *) res = *(volatile __u64_alias_t *) p; break;
default:
barrier();
__builtin_memcpy((void *)res, (const void *)p, size);
barrier();
}
}
static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(volatile __u8_alias_t *) p = *(__u8_alias_t *) res; break;
case 2: *(volatile __u16_alias_t *) p = *(__u16_alias_t *) res; break;
case 4: *(volatile __u32_alias_t *) p = *(__u32_alias_t *) res; break;
case 8: *(volatile __u64_alias_t *) p = *(__u64_alias_t *) res; break;
default:
barrier();
__builtin_memcpy((void *)p, (const void *)res, size);
barrier();
}
}
#define READ_ONCE(x) \
({ \
union { typeof(x) __val; char __c[1]; } __u = \
{ .__c = { 0 } }; \
__read_once_size(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})
#define WRITE_ONCE(x, val) \
({ \
union { typeof(x) __val; char __c[1]; } __u = \
{ .__val = (val) }; \
__write_once_size(&(x), __u.__c, sizeof(x)); \
__u.__val; \
})
void *bpf_obj_new_impl(__u64 local_type_id, void *meta) __ksym;
void bpf_obj_drop_impl(void *kptr, void *meta) __ksym;

View File

@ -151,7 +151,7 @@ char _license[] SEC("license") = "GPL";
volatile u64 nr_cpus_onln;
static struct sys_cpu_util __sys_cpu_util[2];
static volatile int __sys_cpu_util_idx;
static int __sys_cpu_util_idx;
const volatile bool no_freq_scaling;
const volatile u8 verbose;
@ -506,21 +506,21 @@ static struct cpu_ctx *get_cpu_ctx_id(s32 cpu_id)
static struct sys_cpu_util *get_sys_cpu_util_cur(void)
{
if (__sys_cpu_util_idx == 0)
if (READ_ONCE(__sys_cpu_util_idx) == 0)
return &__sys_cpu_util[0];
return &__sys_cpu_util[1];
}
static struct sys_cpu_util *get_sys_cpu_util_next(void)
{
if (__sys_cpu_util_idx == 0)
if (READ_ONCE(__sys_cpu_util_idx) == 0)
return &__sys_cpu_util[1];
return &__sys_cpu_util[0];
}
static void flip_sys_cpu_util(void)
{
__sys_cpu_util_idx ^= 0x1;
WRITE_ONCE(__sys_cpu_util_idx, __sys_cpu_util_idx ^ 0x1);
}
static __attribute__((always_inline))