On 10/17/24 1:06 AM, Namhyung Kim wrote:
The new subtest is attached to sleepable fentry of syncfs() syscall.
It iterates the kmem_cache using bpf_for_each loop and count the number
of entries. Finally it checks it with the number of entries from the
regular iterator.
$ ./vmtest.sh -- ./test_progs -t kmem_cache_iter
...
#130/1 kmem_cache_iter/check_task_struct:OK
#130/2 kmem_cache_iter/check_slabinfo:OK
#130/3 kmem_cache_iter/open_coded_iter:OK
#130 kmem_cache_iter:OK
Summary: 1/3 PASSED, 0 SKIPPED, 0 FAILED
Also simplify the code by using attach routine of the skeleton.
Signed-off-by: Namhyung Kim <namhyung@xxxxxxxxxx>
---
.../testing/selftests/bpf/bpf_experimental.h | 6 ++++
.../bpf/prog_tests/kmem_cache_iter.c | 28 +++++++++++--------
.../selftests/bpf/progs/kmem_cache_iter.c | 24 ++++++++++++++++
3 files changed, 46 insertions(+), 12 deletions(-)
diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h
index b0668f29f7b394eb..cd8ecd39c3f3c68d 100644
--- a/tools/testing/selftests/bpf/bpf_experimental.h
+++ b/tools/testing/selftests/bpf/bpf_experimental.h
@@ -582,4 +582,10 @@ extern int bpf_wq_set_callback_impl(struct bpf_wq *wq,
unsigned int flags__k, void *aux__ign) __ksym;
#define bpf_wq_set_callback(timer, cb, flags) \
bpf_wq_set_callback_impl(timer, cb, flags, NULL)
+
+struct bpf_iter_kmem_cache;
+extern int bpf_iter_kmem_cache_new(struct bpf_iter_kmem_cache *it) __weak __ksym;
+extern struct kmem_cache *bpf_iter_kmem_cache_next(struct bpf_iter_kmem_cache *it) __weak __ksym;
+extern void bpf_iter_kmem_cache_destroy(struct bpf_iter_kmem_cache *it) __weak __ksym;
+
#endif
diff --git a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
index 848d8fc9171fae45..a1fd3bc57c0b21bb 100644
--- a/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/kmem_cache_iter.c
@@ -68,12 +68,18 @@ static void subtest_kmem_cache_iter_check_slabinfo(struct kmem_cache_iter *skel)
fclose(fp);
}
+static void subtest_kmem_cache_iter_open_coded(struct kmem_cache_iter *skel)
+{
+ /* To trigger the open coded iterator attached to the syscall */
+ syncfs(0);
+
+ /* It should be same as we've seen from the explicit iterator */
+ ASSERT_EQ(skel->bss->open_coded_seen, skel->bss->kmem_cache_seen, "open_code_seen_eq");
+}
+
void test_kmem_cache_iter(void)
{
- DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct kmem_cache_iter *skel = NULL;
- union bpf_iter_link_info linfo = {};
- struct bpf_link *link;
char buf[256];
int iter_fd;
@@ -81,16 +87,12 @@ void test_kmem_cache_iter(void)
if (!ASSERT_OK_PTR(skel, "kmem_cache_iter__open_and_load"))
return;
- opts.link_info = &linfo;
- opts.link_info_len = sizeof(linfo);
-
- link = bpf_program__attach_iter(skel->progs.slab_info_collector, &opts);
- if (!ASSERT_OK_PTR(link, "attach_iter"))
+ if (!ASSERT_OK(kmem_cache_iter__attach(skel), "skel_attach"))
with this change.
goto destroy;
- iter_fd = bpf_iter_create(bpf_link__fd(link));
+ iter_fd = bpf_iter_create(bpf_link__fd(skel->links.slab_info_collector));
if (!ASSERT_GE(iter_fd, 0, "iter_create"))
- goto free_link;
+ goto detach;
memset(buf, 0, sizeof(buf));
while (read(iter_fd, buf, sizeof(buf) > 0)) {
@@ -105,11 +107,13 @@ void test_kmem_cache_iter(void)
subtest_kmem_cache_iter_check_task_struct(skel);
if (test__start_subtest("check_slabinfo"))
subtest_kmem_cache_iter_check_slabinfo(skel);
+ if (test__start_subtest("open_coded_iter"))
+ subtest_kmem_cache_iter_open_coded(skel);
close(iter_fd);
-free_link:
- bpf_link__destroy(link);
+detach:
+ kmem_cache_iter__detach(skel);
nit. I think the kmem_cache_iter__destroy() below will also detach, so no need
to explicit kmem_cache_iter__detach().
destroy:
kmem_cache_iter__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
index 72c9dafecd98406b..4c44aa279a5328fe 100644
--- a/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
+++ b/tools/testing/selftests/bpf/progs/kmem_cache_iter.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2024 Google */
#include "bpf_iter.h"
+#include "bpf_experimental.h"
+#include "bpf_misc.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
@@ -33,6 +35,7 @@ extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym;
/* Result, will be checked by userspace */
int task_struct_found;
int kmem_cache_seen;
+int open_coded_seen;
SEC("iter/kmem_cache")
int slab_info_collector(struct bpf_iter__kmem_cache *ctx)
@@ -85,3 +88,24 @@ int BPF_PROG(check_task_struct)
task_struct_found = -2;
return 0;
}
+
+SEC("fentry.s/" SYS_PREFIX "sys_syncfs")
+int open_coded_iter(const void *ctx)
+{
+ struct kmem_cache *s;
+
+ bpf_for_each(kmem_cache, s) {
+ struct kmem_cache_result *r;
+ int idx = open_coded_seen;
+
+ r = bpf_map_lookup_elem(&slab_result, &idx);
+ if (r == NULL)
+ break;
+
+ open_coded_seen++;
I am not sure if this will work well if the testing system somehow has another
process calling syncfs. It is probably a good idea to guard this by checking the
tid of the test_progs at the beginning of this bpf prog.
+
+ if (r->obj_size != s->size)
+ break;
+ }
+ return 0;
+}