Add some basic selftests for qspinlock built over BPF arena using cond_break. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx> --- .../bpf/prog_tests/arena_spin_lock.c | 68 +++++++++++++++++++ .../selftests/bpf/progs/arena_spin_lock.c | 49 +++++++++++++ 2 files changed, 117 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c create mode 100644 tools/testing/selftests/bpf/progs/arena_spin_lock.c diff --git a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c new file mode 100644 index 000000000000..cd473d9ce764 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <test_progs.h> +#include <network_helpers.h> + +struct qspinlock { int val; }; + +#include "arena_spin_lock.skel.h" + +static long cpu; +int *counter; + +static void *spin_lock_thread(void *arg) +{ + int err, prog_fd = *(u32 *) arg; + LIBBPF_OPTS(bpf_test_run_opts, topts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + cpu_set_t cpuset; + + CPU_ZERO(&cpuset); + CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset); + ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset), "cpu affinity"); + + while (*READ_ONCE(counter) <= 50) { + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run err"); + ASSERT_EQ(topts.retval, 1, "test_run retval"); + } + pthread_exit(arg); +} + +void test_arena_spin_lock(void) +{ + struct arena_spin_lock *skel; + pthread_t thread_id[16]; + int prog_fd, i, err; + void *ret; + + skel = arena_spin_lock__open_and_load(); + if (!ASSERT_OK_PTR(skel, "arena_spin_lock__open_and_load")) + return; + if (skel->data->test_skip == 2) { + test__skip(); + goto end; + } + + counter = &skel->bss->counter; + + prog_fd = bpf_program__fd(skel->progs.prog); + for (i = 0; i < 16; i++) { + err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd); + if (!ASSERT_OK(err, "pthread_create")) + goto end; + } + + for (i = 0; i < 16; i++) { + if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join")) + goto end; + if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd")) + goto end; + } +end: + arena_spin_lock__destroy(skel); + return; +} diff --git a/tools/testing/selftests/bpf/progs/arena_spin_lock.c b/tools/testing/selftests/bpf/progs/arena_spin_lock.c new file mode 100644 index 000000000000..4f86774fa058 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/arena_spin_lock.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" +#include "bpf_arena_qspinlock.h" + +struct { + __uint(type, BPF_MAP_TYPE_ARENA); + __uint(map_flags, BPF_F_MMAPABLE); + __uint(max_entries, 100); /* number of pages */ +#ifdef __TARGET_ARCH_arm64 + __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */ +#else + __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */ +#endif +} arena SEC(".maps"); + +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST) +struct qspinlock __arena *lock; +void *ptr; +int test_skip = 1; +#else +int test_skip = 2; +#endif + +int counter; + +SEC("tc") +int prog(void *ctx) +{ + bool ret = false; + +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST) + ptr = &arena; + bpf_preempt_disable(); + if (queued_spin_lock(lock)) + return false; + WRITE_ONCE(counter, READ_ONCE(counter) + 1); + bpf_repeat(BPF_MAX_LOOPS); + ret = true; + queued_spin_unlock(lock); + bpf_preempt_enable(); +#endif + return ret; +} + +char _license[] SEC("license") = "GPL"; -- 2.43.5