Re: [PATCH bpf-next v3 3/3] selftests/bpf: Add tests for arena spin lock

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Wed, 5 Mar 2025 at 03:04, Alexei Starovoitov
<alexei.starovoitov@xxxxxxxxx> wrote:
>
> On Tue, Mar 4, 2025 at 5:18 PM Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx> wrote:
> >
> > Add some basic selftests for qspinlock built over BPF arena using
> > cond_break_label macro.
> >
> > Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx>
> > ---
> >  .../bpf/prog_tests/arena_spin_lock.c          | 102 ++++++++++++++++++
> >  .../selftests/bpf/progs/arena_spin_lock.c     |  51 +++++++++
> >  2 files changed, 153 insertions(+)
> >  create mode 100644 tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
> >  create mode 100644 tools/testing/selftests/bpf/progs/arena_spin_lock.c
> >
> > diff --git a/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
> > new file mode 100644
> > index 000000000000..2cc078ed1ddb
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/prog_tests/arena_spin_lock.c
> > @@ -0,0 +1,102 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
> > +#include <test_progs.h>
> > +#include <network_helpers.h>
> > +#include <sys/sysinfo.h>
> > +
> > +struct qspinlock { int val; };
> > +typedef struct qspinlock arena_spinlock_t;
> > +
> > +struct arena_qnode {
> > +       unsigned long next;
> > +       int count;
> > +       int locked;
> > +};
> > +
> > +#include "arena_spin_lock.skel.h"
> > +
> > +static long cpu;
> > +int *counter;
> > +
> > +static void *spin_lock_thread(void *arg)
> > +{
> > +       int err, prog_fd = *(u32 *)arg;
> > +       LIBBPF_OPTS(bpf_test_run_opts, topts,
> > +               .data_in = &pkt_v4,
> > +               .data_size_in = sizeof(pkt_v4),
> > +               .repeat = 1,
> > +       );
>
> Why bother with 'tc' prog type?
> Pick syscall type, and above will be shorter:
> LIBBPF_OPTS(bpf_test_run_opts, opts);
>

Ack.

> > +       cpu_set_t cpuset;
> > +
> > +       CPU_ZERO(&cpuset);
> > +       CPU_SET(__sync_fetch_and_add(&cpu, 1), &cpuset);
> > +       ASSERT_OK(pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset), "cpu affinity");
> > +
> > +       while (*READ_ONCE(counter) <= 1000) {
>
> READ_ONCE(*counter) ?
>
> but why add this user->kernel switch overhead.
> Use .repeat = 1000
> one bpf_prog_test_run_opts()
> and check at the end that *counter == 1000 ?

Ok.

>
> > +               err = bpf_prog_test_run_opts(prog_fd, &topts);
> > +               if (!ASSERT_OK(err, "test_run err"))
> > +                       break;
> > +               if (!ASSERT_EQ((int)topts.retval, 0, "test_run retval"))
> > +                       break;
> > +       }
> > +       pthread_exit(arg);
> > +}
> > +
> > +static void test_arena_spin_lock_size(int size)
> > +{
> > +       LIBBPF_OPTS(bpf_test_run_opts, topts);
> > +       struct arena_spin_lock *skel;
> > +       pthread_t thread_id[16];
> > +       int prog_fd, i, err;
> > +       void *ret;
> > +
> > +       if (get_nprocs() < 2) {
> > +               test__skip();
> > +               return;
> > +       }
> > +
> > +       skel = arena_spin_lock__open_and_load();
> > +       if (!ASSERT_OK_PTR(skel, "arena_spin_lock__open_and_load"))
> > +               return;
> > +       if (skel->data->test_skip == 2) {
> > +               test__skip();
> > +               goto end;
> > +       }
> > +       counter = &skel->bss->counter;
> > +       skel->bss->cs_count = size;
> > +
> > +       prog_fd = bpf_program__fd(skel->progs.prog);
> > +       for (i = 0; i < 16; i++) {
> > +               err = pthread_create(&thread_id[i], NULL, &spin_lock_thread, &prog_fd);
> > +               if (!ASSERT_OK(err, "pthread_create"))
> > +                       goto end;
> > +       }
> > +
> > +       for (i = 0; i < 16; i++) {
> > +               if (!ASSERT_OK(pthread_join(thread_id[i], &ret), "pthread_join"))
> > +                       goto end;
> > +               if (!ASSERT_EQ(ret, &prog_fd, "ret == prog_fd"))
> > +                       goto end;
> > +       }
> > +end:
> > +       arena_spin_lock__destroy(skel);
> > +       return;
> > +}
> > +
> > +void test_arena_spin_lock(void)
> > +{
> > +       if (test__start_subtest("arena_spin_lock_1"))
> > +               test_arena_spin_lock_size(1);
> > +       cpu = 0;
> > +       if (test__start_subtest("arena_spin_lock_1000"))
> > +               test_arena_spin_lock_size(1000);
> > +       cpu = 0;
> > +       if (test__start_subtest("arena_spin_lock_10000"))
> > +               test_arena_spin_lock_size(10000);
> > +       cpu = 0;
> > +       if (test__start_subtest("arena_spin_lock_100000"))
> > +               test_arena_spin_lock_size(100000);
> > +       cpu = 0;
> > +       if (test__start_subtest("arena_spin_lock_500000"))
> > +               test_arena_spin_lock_size(500000);
>
> Do 10k and 500k make a difference?
> I suspect 1, 1k and 100k would cover the interesting range.

They do make a difference inside a VM, but not on bare-metal.
I can stick to three sizes though.

>
> > +}
> > diff --git a/tools/testing/selftests/bpf/progs/arena_spin_lock.c b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
> > new file mode 100644
> > index 000000000000..3e8ce807e028
> > --- /dev/null
> > +++ b/tools/testing/selftests/bpf/progs/arena_spin_lock.c
> > @@ -0,0 +1,51 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
> > +#include <vmlinux.h>
> > +#include <bpf/bpf_tracing.h>
> > +#include <bpf/bpf_helpers.h>
> > +#include "bpf_misc.h"
> > +#include "bpf_arena_spin_lock.h"
> > +
> > +struct {
> > +       __uint(type, BPF_MAP_TYPE_ARENA);
> > +       __uint(map_flags, BPF_F_MMAPABLE);
> > +       __uint(max_entries, 100); /* number of pages */
> > +#ifdef __TARGET_ARCH_arm64
> > +       __ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
> > +#else
> > +       __ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
> > +#endif
> > +} arena SEC(".maps");
> > +
> > +int cs_count;
> > +
> > +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
> > +arena_spinlock_t __arena lock;
> > +void *ptr;
> > +int test_skip = 1;
> > +#else
> > +int test_skip = 2;
> > +#endif
> > +
> > +int counter;
> > +
> > +SEC("tc")
> > +int prog(void *ctx)
> > +{
> > +       int ret = -2;
> > +
> > +#if defined(ENABLE_ATOMICS_TESTS) && defined(__BPF_FEATURE_ADDR_SPACE_CAST)
> > +       unsigned long flags;
> > +
> > +       ptr = &arena;
>
> Is it really necessary?

Probably a remnant from previous versions, but sometimes if you don't do this
it gives you an error saying addr_space_cast cannot be used in a
program with no associated arena.

Probably if it never sees the arena map reference anywhere in the
program. This is a way to do that.
But in this case I dropped it and it works.

>
> > +       if ((ret = arena_spin_lock_irqsave(&lock, flags)))
> > +               return ret;
> > +       WRITE_ONCE(counter, READ_ONCE(counter) + 1);
> > +       bpf_repeat(cs_count);
> > +       ret = 0;
> > +       arena_spin_unlock_irqrestore(&lock, flags);
> > +#endif
> > +       return ret;
> > +}
> > +
> > +char _license[] SEC("license") = "GPL";
> > --
> > 2.47.1
> >





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux