On Tue, May 7, 2024 at 6:39 AM Yafang Shao <laoar.shao@xxxxxxxxx> wrote: > > On Tue, May 7, 2024 at 11:42 AM Andrii Nakryiko > <andrii.nakryiko@xxxxxxxxx> wrote: > > > > On Sun, May 5, 2024 at 8:35 PM Yafang Shao <laoar.shao@xxxxxxxxx> wrote: > > > > > > Add test cases for the bits iter: > > > - positive case > > > - bit mask smaller than 8 bytes > > > - a typical case of having 8-byte bit mask > > > - another typical case where bit mask is > 8 bytes > > > - the index of set bit > > > > > > - nagative cases > > > - bpf_iter_bits_destroy() is required after calling > > > bpf_iter_bits_new() > > > - bpf_iter_bits_destroy() can only destroy an initialized iter > > > - bpf_iter_bits_next() must use an initialized iter > > > > > > Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx> > > > --- > > > .../selftests/bpf/prog_tests/verifier.c | 2 + > > > .../selftests/bpf/progs/verifier_bits_iter.c | 160 ++++++++++++++++++ > > > 2 files changed, 162 insertions(+) > > > create mode 100644 tools/testing/selftests/bpf/progs/verifier_bits_iter.c > > > > > > diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c > > > index c4f9f306646e..7e04ecaaa20a 100644 > > > --- a/tools/testing/selftests/bpf/prog_tests/verifier.c > > > +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c > > > @@ -84,6 +84,7 @@ > > > #include "verifier_xadd.skel.h" > > > #include "verifier_xdp.skel.h" > > > #include "verifier_xdp_direct_packet_access.skel.h" > > > +#include "verifier_bits_iter.skel.h" > > > > > > #define MAX_ENTRIES 11 > > > > > > @@ -198,6 +199,7 @@ void test_verifier_var_off(void) { RUN(verifier_var_off); } > > > void test_verifier_xadd(void) { RUN(verifier_xadd); } > > > void test_verifier_xdp(void) { RUN(verifier_xdp); } > > > void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } > > > +void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); } > > > > > > static int init_test_val_map(struct bpf_object *obj, char *map_name) > > > { > > > diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c > > > new file mode 100644 > > > index 000000000000..2f7b62b25638 > > > --- /dev/null > > > +++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c > > > @@ -0,0 +1,160 @@ > > > +// SPDX-License-Identifier: GPL-2.0-only > > > +/* Copyright (c) 2024 Yafang Shao <laoar.shao@xxxxxxxxx> */ > > > + > > > +#include "vmlinux.h" > > > +#include <bpf/bpf_helpers.h> > > > +#include <bpf/bpf_tracing.h> > > > + > > > +#include "bpf_misc.h" > > > +#include "task_kfunc_common.h" > > > + > > > +char _license[] SEC("license") = "GPL"; > > > + > > > +int bpf_iter_bits_new(struct bpf_iter_bits *it, const void *unsafe_ptr__ign, > > > + u32 nr_bits) __ksym __weak; > > > +int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym __weak; > > > +void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym __weak; > > > + > > > +SEC("iter.s/cgroup") > > > +__description("bits iter without destroy") > > > +__failure __msg("Unreleased reference") > > > +int BPF_PROG(no_destroy, struct bpf_iter_meta *meta, struct cgroup *cgrp) > > > +{ > > > + struct bpf_iter_bits it; > > > + struct task_struct *p; > > > + > > > + p = bpf_task_from_pid(1); > > > + if (!p) > > > + return 1; > > > + > > > + bpf_iter_bits_new(&it, p->cpus_ptr, 8192); > > > + > > > + bpf_iter_bits_next(&it); > > > + bpf_task_release(p); > > > + return 0; > > > +} > > > + > > > +SEC("iter/cgroup") > > > +__description("bits iter with uninitialized iter in ->next()") > > > +__failure __msg("expected an initialized iter_bits as arg #1") > > > +int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) > > > +{ > > > + struct bpf_iter_bits *it = NULL; > > > + > > > + bpf_iter_bits_next(it); > > > + return 0; > > > +} > > > + > > > +SEC("iter/cgroup") > > > +__description("bits iter with uninitialized iter in ->destroy()") > > > +__failure __msg("expected an initialized iter_bits as arg #1") > > > +int BPF_PROG(destroy_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) > > > +{ > > > + struct bpf_iter_bits it = {}; > > > + > > > + bpf_iter_bits_destroy(&it); > > > + return 0; > > > +} > > > + > > > +SEC("syscall") > > > +__description("bits copy 32") > > > +__success __retval(10) > > > +int bits_copy32(void) > > > +{ > > > + /* 21 bits: --------------------- */ > > > + u32 data = 0b11111101111101111100001000100101U; > > > > if you define this bit mask as an array of bytes, then you won't have > > to handle big-endian in the tests at all > > This test case provides a clear example of iterating over data of type > u32, offering valuable guidance for users who need to perform such > iterations. > > > > > > > > + int nr = 0, offset = 0; > > > + int *bit; > > > + > > > +#if defined(__TARGET_ARCH_s390) > > > + offset = sizeof(u32) - (21 + 7) / 8; > > > +#endif > > > + bpf_for_each(bits, bit, ((char *)&data) + offset, 21) > > > + nr++; > > > + return nr; > > > +} > > > + > > > +SEC("syscall") > > > +__description("bits copy 64") > > > +__success __retval(18) > > > +int bits_copy64(void) > > > +{ > > > + /* 34 bits: ~-------- */ > > > + u64 data = 0xffffefdf0f0f0f0fUL; > > > + int nr = 0, offset = 0; > > > + int *bit; > > > + > > > +#if defined(__TARGET_ARCH_s390) > > > + offset = sizeof(u64) - (34 + 7) / 8; > > > +#endif > > > + > > > + bpf_for_each(bits, bit, ((char *)&data) + offset, 34) > > > > see above about byte array, but if we define different (not as byte > > array but long[]), it would be cleaner to have > > This test case demonstrates how to iterate over data of type u64. > > > > > #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ > > u64 data = 0x......UL; > > #else > > u64 data = 0x......UL; > > #endif > > looks good. > Please hold off on sending a new revision until we figure out what the contract should be. Because I feel like it's a (relatively) big decision whether a bit mask is treated as an array of bytes or as an array of longs. For little-endian it makes no difference, but for big-endian it's a big difference and has usability and performance implications. > > > > wherer we'd hard-code bit masks in proper endianness in one place and > > then just do a clean `bpf_for_each(bits, bit, &data, <len>) {}` calls > > > > > + nr++; > > > + return nr; > > > +} > > > + > > > +SEC("syscall") > > > +__description("bits memalloc long-aligned") > > > +__success __retval(32) /* 16 * 2 */ > > > +int bits_memalloc(void) > > > +{ > > > + char data[16]; > > > + int nr = 0; > > > + int *bit; > > > + > > > + __builtin_memset(&data, 0x48, sizeof(data)); > > > + bpf_for_each(bits, bit, &data, sizeof(data) * 8) > > > + nr++; > > > + return nr; > > > +} > > > + > > > +SEC("syscall") > > > +__description("bits memalloc non-long-aligned") > > > +__success __retval(85) /* 17 * 5*/ > > > +int bits_memalloc_non_aligned(void) > > > +{ > > > + char data[17]; > > > + int nr = 0; > > > + int *bit; > > > + > > > + __builtin_memset(&data, 0x1f, sizeof(data)); > > > + bpf_for_each(bits, bit, &data, sizeof(data) * 8) > > > + nr++; > > > + return nr; > > > +} > > > + > > > +SEC("syscall") > > > +__description("bits memalloc non-aligned-bits") > > > +__success __retval(27) /* 8 * 3 + 3 */ > > > +int bits_memalloc_non_aligned_bits(void) > > > +{ > > > + char data[16]; > > > + int nr = 0; > > > + int *bit; > > > + > > > + __builtin_memset(&data, 0x31, sizeof(data)); > > > + /* Different with all other bytes */ > > > + data[8] = 0xf7; > > > + > > > + bpf_for_each(bits, bit, &data, 68) > > > + nr++; > > > + return nr; > > > +} > > > + > > > + > > > +SEC("syscall") > > > +__description("bit index") > > > +__success __retval(8) > > > +int bit_index(void) > > > +{ > > > + u64 data = 0x100; > > > + int bit_idx = 0; > > > + int *bit; > > > + > > > + bpf_for_each(bits, bit, &data, 64) { > > > + if (*bit == 0) > > > + continue; > > > + bit_idx = *bit; > > > + } > > > + return bit_idx; > > > +} > > > -- > > > 2.30.1 (Apple Git-130) > > > > > > > -- > Regards > Yafang