On Wed, May 11, 2022 at 2:39 AM Feng zhou <zhoufeng.zf@xxxxxxxxxxxxx> wrote: > > From: Feng Zhou <zhoufeng.zf@xxxxxxxxxxxxx> > > test_progs: > Tests new ebpf helpers bpf_map_lookup_percpu_elem. > > Signed-off-by: Feng Zhou <zhoufeng.zf@xxxxxxxxxxxxx> > --- > .../bpf/prog_tests/map_lookup_percpu_elem.c | 46 ++++++++++++++++ > .../bpf/progs/test_map_lookup_percpu_elem.c | 54 +++++++++++++++++++ > 2 files changed, 100 insertions(+) > create mode 100644 tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c > create mode 100644 tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c > > diff --git a/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c > new file mode 100644 > index 000000000000..58b24c2112b0 > --- /dev/null > +++ b/tools/testing/selftests/bpf/prog_tests/map_lookup_percpu_elem.c > @@ -0,0 +1,46 @@ > +// SPDX-License-Identifier: GPL-2.0 > +// Copyright (c) 2022 Bytedance /* */ instead of // > + > +#include <test_progs.h> > + > +#include "test_map_lookup_percpu_elem.skel.h" > + > +#define TEST_VALUE 1 > + > +void test_map_lookup_percpu_elem(void) > +{ > + struct test_map_lookup_percpu_elem *skel; > + int key = 0, ret; > + int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); I think this is actually wrong and will break selftests on systems with offline CPUs. Please use libbpf_num_possible_cpus() instead. > + int *buf; > + > + buf = (int *)malloc(nr_cpus*sizeof(int)); > + if (!ASSERT_OK_PTR(buf, "malloc")) > + return; > + memset(buf, 0, nr_cpus*sizeof(int)); this is wrong, kernel expects to have roundup(sz, 8) per each CPU, while you have just 4 bytes per each element please also have spaces around multiplication operator here and above > + buf[0] = TEST_VALUE; > + > + skel = test_map_lookup_percpu_elem__open_and_load(); > + if (!ASSERT_OK_PTR(skel, "test_map_lookup_percpu_elem__open_and_load")) > + return; buf leaking here > + ret = test_map_lookup_percpu_elem__attach(skel); > + ASSERT_OK(ret, "test_map_lookup_percpu_elem__attach"); > + > + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_array_map), &key, buf, 0); > + ASSERT_OK(ret, "percpu_array_map update"); > + > + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_hash_map), &key, buf, 0); > + ASSERT_OK(ret, "percpu_hash_map update"); > + > + ret = bpf_map_update_elem(bpf_map__fd(skel->maps.percpu_lru_hash_map), &key, buf, 0); > + ASSERT_OK(ret, "percpu_lru_hash_map update"); > + > + syscall(__NR_getuid); > + > + ret = skel->bss->percpu_array_elem_val == TEST_VALUE && > + skel->bss->percpu_hash_elem_val == TEST_VALUE && > + skel->bss->percpu_lru_hash_elem_val == TEST_VALUE; > + ASSERT_OK(!ret, "bpf_map_lookup_percpu_elem success"); this would be better done as three separate ASSERT_EQ(), combining into opaque true/false isn't helpful if something breaks > + > + test_map_lookup_percpu_elem__destroy(skel); > +} > diff --git a/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c > new file mode 100644 > index 000000000000..5d4ef86cbf48 > --- /dev/null > +++ b/tools/testing/selftests/bpf/progs/test_map_lookup_percpu_elem.c > @@ -0,0 +1,54 @@ > +// SPDX-License-Identifier: GPL-2.0 > +// Copyright (c) 2022 Bytedance /* */ instead of // > + > +#include "vmlinux.h" > +#include <bpf/bpf_helpers.h> > + > +int percpu_array_elem_val = 0; > +int percpu_hash_elem_val = 0; > +int percpu_lru_hash_elem_val = 0; > + > +struct { > + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); > + __uint(max_entries, 1); > + __type(key, __u32); > + __type(value, __u32); > +} percpu_array_map SEC(".maps"); > + > +struct { > + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); > + __uint(max_entries, 1); > + __type(key, __u32); > + __type(value, __u32); > +} percpu_hash_map SEC(".maps"); > + > +struct { > + __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH); > + __uint(max_entries, 1); > + __type(key, __u32); > + __type(value, __u32); > +} percpu_lru_hash_map SEC(".maps"); > + > +SEC("tp/syscalls/sys_enter_getuid") > +int sysenter_getuid(const void *ctx) > +{ > + __u32 key = 0; > + __u32 cpu = 0; > + __u32 *value; > + > + value = bpf_map_lookup_percpu_elem(&percpu_array_map, &key, cpu); > + if (value) > + percpu_array_elem_val = *value; > + > + value = bpf_map_lookup_percpu_elem(&percpu_hash_map, &key, cpu); > + if (value) > + percpu_hash_elem_val = *value; > + > + value = bpf_map_lookup_percpu_elem(&percpu_lru_hash_map, &key, cpu); > + if (value) > + percpu_lru_hash_elem_val = *value; > + if the test happens to run on CPU 0 then the test doesn't really test much. It would be more interesting to have a bpf_loop() iteration that would fetch values on each possible CPU instead and do something with it. > + return 0; > +} > + > +char _license[] SEC("license") = "GPL"; > -- > 2.20.1 >