On Thu, Jun 27, 2024 at 2:09 AM Ilya Leoshkevich <iii@xxxxxxxxxxxxx> wrote: > > Check that __sync_*() functions don't cause kernel panics when handling > freed arena pages. > > Signed-off-by: Ilya Leoshkevich <iii@xxxxxxxxxxxxx> > --- > .../selftests/bpf/prog_tests/arena_atomics.c | 16 +++++++ > .../selftests/bpf/progs/arena_atomics.c | 43 +++++++++++++++++++ > 2 files changed, 59 insertions(+) > > diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c > index 0807a48a58ee..38eef4cc5c80 100644 > --- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c > +++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c > @@ -146,6 +146,20 @@ static void test_xchg(struct arena_atomics *skel) > ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result"); > } > > +static void test_uaf(struct arena_atomics *skel) > +{ > + LIBBPF_OPTS(bpf_test_run_opts, topts); > + int err, prog_fd; > + > + /* No need to attach it, just run it directly */ > + prog_fd = bpf_program__fd(skel->progs.uaf); > + err = bpf_prog_test_run_opts(prog_fd, &topts); > + if (!ASSERT_OK(err, "test_run_opts err")) > + return; > + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) > + return; > +} > + > void test_arena_atomics(void) > { > struct arena_atomics *skel; > @@ -180,6 +194,8 @@ void test_arena_atomics(void) > test_cmpxchg(skel); > if (test__start_subtest("xchg")) > test_xchg(skel); > + if (test__start_subtest("uaf")) > + test_uaf(skel); > > cleanup: > arena_atomics__destroy(skel); > diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c > index 55f10563208d..a86c8cdf1a30 100644 > --- a/tools/testing/selftests/bpf/progs/arena_atomics.c > +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c > @@ -176,3 +176,46 @@ int xchg(const void *ctx) > > return 0; > } > + > +SEC("syscall") > +int uaf(const void *ctx) > +{ > + if (pid != (bpf_get_current_pid_tgid() >> 32)) > + return 0; > +#ifdef ENABLE_ATOMICS_TESTS > + void __arena *page; > + > + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); > + bpf_arena_free_pages(&arena, page, 1); > + > + __sync_fetch_and_add((__u32 __arena *)page, 1); > + __sync_add_and_fetch((__u32 __arena *)page, 1); > + __sync_fetch_and_sub((__u32 __arena *)page, 1); > + __sync_sub_and_fetch((__u32 __arena *)page, 1); > + __sync_fetch_and_and((__u32 __arena *)page, 1); > + __sync_and_and_fetch((__u32 __arena *)page, 1); > + __sync_fetch_and_or((__u32 __arena *)page, 1); > + __sync_or_and_fetch((__u32 __arena *)page, 1); > + __sync_fetch_and_xor((__u32 __arena *)page, 1); > + __sync_xor_and_fetch((__u32 __arena *)page, 1); > + __sync_val_compare_and_swap((__u32 __arena *)page, 0, 1); > + __sync_lock_test_and_set((__u32 __arena *)page, 1); > + > + __sync_fetch_and_add((__u64 __arena *)page, 1); > + __sync_add_and_fetch((__u64 __arena *)page, 1); > + __sync_fetch_and_sub((__u64 __arena *)page, 1); > + __sync_sub_and_fetch((__u64 __arena *)page, 1); > + __sync_fetch_and_and((__u64 __arena *)page, 1); > + __sync_and_and_fetch((__u64 __arena *)page, 1); > + __sync_fetch_and_or((__u64 __arena *)page, 1); > + __sync_or_and_fetch((__u64 __arena *)page, 1); > + __sync_fetch_and_xor((__u64 __arena *)page, 1); > + __sync_xor_and_fetch((__u64 __arena *)page, 1); > + __sync_val_compare_and_swap((__u64 __arena *)page, 0, 1); > + __sync_lock_test_and_set((__u64 __arena *)page, 1); > +#endif Needs to be gated to exclude x86. Not sure about arm64.