The test traverses all slab caches using the kmem_cache_iter and check if current task's pointer is from "task_struct" slab cache. Signed-off-by: Namhyung Kim <namhyung@xxxxxxxxxx> --- .../bpf/prog_tests/kmem_cache_iter.c | 64 ++++++++++++++++++ tools/testing/selftests/bpf/progs/bpf_iter.h | 7 ++ .../selftests/bpf/progs/kmem_cache_iter.c | 66 +++++++++++++++++++ 3 files changed, 137 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c create mode 100644 tools/testing/selftests/bpf/progs/kmem_cache_iter.c diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c new file mode 100644 index 0000000000000000..3965e2924ac82d91 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google */ + +#include <test_progs.h> +#include <bpf/libbpf.h> +#include <bpf/btf.h> +#include "kmem_cache_iter.skel.h" + +static void test_kmem_cache_iter_check_task(struct kmem_cache_iter *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .flags = 0, /* run it with the current task */ + ); + int prog_fd = bpf_program__fd(skel->progs.check_task_struct); + + /* get task_struct and check it if's from a slab cache */ + bpf_prog_test_run_opts(prog_fd, &opts); + + /* the BPF program should set 'found' variable */ + ASSERT_EQ(skel->bss->found, 1, "found task_struct"); +} + +void test_kmem_cache_iter(void) +{ + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); + struct kmem_cache_iter *skel = NULL; + union bpf_iter_link_info linfo = {}; + struct bpf_link *link; + char buf[1024]; + int iter_fd; + + skel = kmem_cache_iter__open_and_load(); + if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load")) + return; + + opts.link_info = &linfo; + opts.link_info_len = sizeof(linfo); + + link = bpf_program__attach_iter(skel->progs.slab_info_collector, &opts); + if (!ASSERT_OK_PTR(link, "attach_iter")) + goto destroy; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "iter_create")) + goto free_link; + + memset(buf, 0, sizeof(buf)); + while (read(iter_fd, buf, sizeof(buf) > 0)) { + /* read out all contents */ + printf("%s", buf); + } + + /* next reads should return 0 */ + ASSERT_EQ(read(iter_fd, buf, sizeof(buf)), 0, "read"); + + test_kmem_cache_iter_check_task(skel); + + close(iter_fd); + +free_link: + bpf_link__destroy(link); +destroy: + kmem_cache_iter__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h index c41ee80533ca219a..3305dc3a74b32481 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter.h +++ b/tools/testing/selftests/bpf/progs/bpf_iter.h @@ -24,6 +24,7 @@ #define BTF_F_PTR_RAW BTF_F_PTR_RAW___not_used #define BTF_F_ZERO BTF_F_ZERO___not_used #define bpf_iter__ksym bpf_iter__ksym___not_used +#define bpf_iter__kmem_cache bpf_iter__kmem_cache___not_used #include "vmlinux.h" #undef bpf_iter_meta #undef bpf_iter__bpf_map @@ -48,6 +49,7 @@ #undef BTF_F_PTR_RAW #undef BTF_F_ZERO #undef bpf_iter__ksym +#undef bpf_iter__kmem_cache struct bpf_iter_meta { struct seq_file *seq; @@ -165,3 +167,8 @@ struct bpf_iter__ksym { struct bpf_iter_meta *meta; struct kallsym_iter *ksym; }; + +struct bpf_iter__kmem_cache { + struct bpf_iter_meta *meta; + struct kmem_cache *s; +} __attribute__((preserve_access_index)); diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c new file mode 100644 index 0000000000000000..3f6ec15a1bf6344c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Google */ + +#include "bpf_iter.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +#define SLAB_NAME_MAX 256 + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(key_size, sizeof(void *)); + __uint(value_size, SLAB_NAME_MAX); + __uint(max_entries, 1024); +} slab_hash SEC(".maps"); + +extern struct kmem_cache *bpf_get_kmem_cache(__u64 addr) __ksym; + +/* result, will be checked by userspace */ +int found; + +SEC("iter/kmem_cache") +int slab_info_collector(struct bpf_iter__kmem_cache *ctx) +{ + struct seq_file *seq = ctx->meta->seq; + struct kmem_cache *s = ctx->s; + + if (s) { + char name[SLAB_NAME_MAX]; + + /* + * To make sure if the slab_iter implements the seq interface + * properly and it's also useful for debugging. + */ + BPF_SEQ_PRINTF(seq, "%s: %u\n", s->name, s->object_size); + + bpf_probe_read_kernel_str(name, sizeof(name), s->name); + bpf_map_update_elem(&slab_hash, &s, name, BPF_NOEXIST); + } + + return 0; +} + +SEC("raw_tp/bpf_test_finish") +int BPF_PROG(check_task_struct) +{ + __u64 curr = bpf_get_current_task(); + struct kmem_cache *s; + char *name; + + s = bpf_get_kmem_cache(curr); + if (s == NULL) { + found = -1; + return 0; + } + + name = bpf_map_lookup_elem(&slab_hash, &s); + if (name && !bpf_strncmp(name, 11, "task_struct")) + found = 1; + else + found = -2; + + return 0; +} -- 2.46.1.824.gd892dcdcdd-goog