On Sat, 2025-02-22 at 00:33 +0800, Tao Chen wrote: [...] > +static const struct { > + const char *name; > + int code; > +} program_types[] = { > +#define _T(n) { #n, BPF_PROG_TYPE_##n } > + _T(KPROBE), > + _T(XDP), > + _T(SYSCALL), > + _T(SCHED_CLS), > + _T(SCHED_ACT), > + _T(SK_SKB), > + _T(SOCKET_FILTER), > + _T(CGROUP_SKB), > + _T(LWT_OUT), > + _T(LWT_IN), > + _T(LWT_XMIT), > + _T(LWT_SEG6LOCAL), > + _T(NETFILTER), > + _T(CGROUP_SOCK_ADDR), > + _T(SCHED_ACT) > +#undef _T > +}; > + > +void test_libbpf_probe_kfuncs_many(void) > +{ Hi Tao, Sorry, probably some miscommunication from my side. I did not mean this test for inclusion, it was meant as a one time manual inspection of libbpf_probe_bpf_kfunc results. Just as a sanity check before series is merged. As an automated test it does not provide much meaningful signal. > + int i, kfunc_id, ret, id; > + const struct btf_type *t; > + struct btf *btf = NULL; > + const char *kfunc; > + const char *tag; > + > + btf = btf__parse("/sys/kernel/btf/vmlinux", NULL); > + if (!ASSERT_OK_PTR(btf, "btf_parse")) > + return; > + for (id = 0; id < btf__type_cnt(btf); ++id) { > + t = btf__type_by_id(btf, id); > + if (!t) > + continue; > + if (!btf_is_decl_tag(t)) > + continue; > + tag = btf__name_by_offset(btf, t->name_off); > + if (strcmp(tag, "bpf_kfunc") != 0) > + continue; > + kfunc_id = t->type; > + t = btf__type_by_id(btf, kfunc_id); > + if (!btf_is_func(t)) > + continue; > + kfunc = btf__name_by_offset(btf, t->name_off); > + for (i = 0; i < ARRAY_SIZE(program_types); ++i) { > + ret = libbpf_probe_bpf_kfunc(program_types[i].code, > + kfunc_id, -1, NULL); > + if (ret < 0) { > + ASSERT_FAIL("kfunc:%s use prog type:%d", > + kfunc, program_types[i].code); > + goto cleanup; > + } > + } > + } > +cleanup: > + btf__free(btf); > +}