On 12/14/23 23:17, Martin KaFai Lau wrote:
On 12/8/23 4:27 PM, thinker.li@xxxxxxxxx wrote:
diff --git
a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
new file mode 100644
index 000000000000..55a4c6ed92aa
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <time.h>
+
+#include "struct_ops_module.skel.h"
+#include "testmod_unload.skel.h"
+
+static void test_regular_load(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct struct_ops_module *skel;
+ struct testmod_unload *skel_unload;
+ struct bpf_link *link_map_free = NULL;
+ struct bpf_link *link;
+ int err, i;
+
+ skel = struct_ops_module__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
+ return;
+
+ err = struct_ops_module__load(skel);
+ if (!ASSERT_OK(err, "struct_ops_module_load"))
+ goto cleanup;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
+ ASSERT_OK_PTR(link, "attach_test_mod_1");
+
+ /* test_2() will be called from bpf_dummy_reg() in bpf_testmod.c */
+ ASSERT_EQ(skel->bss->test_2_result, 7, "test_2_result");
+
+ bpf_link__destroy(link);
+
+cleanup:
+ skel_unload = testmod_unload__open_and_load();
+
+ if (ASSERT_OK_PTR(skel_unload, "testmod_unload_open"))
+ link_map_free =
bpf_program__attach(skel_unload->progs.trace_map_free);
+ struct_ops_module__destroy(skel);
+
+ if (!ASSERT_OK_PTR(link_map_free, "create_link_map_free"))
+ return;
+
+ /* Wait for the struct_ops map to be freed. Struct_ops maps hold a
+ * refcount to the module btf. And, this function unloads and then
+ * loads bpf_testmod. Without waiting the map to be freed, the next
+ * test may fail to unload the bpf_testmod module since the map is
+ * still holding a refcnt to the module.
+ */
+ for (i = 0; i < 10; i++) {
+ if (skel_unload->bss->bpf_testmod_put)
+ break;
+ usleep(100000);
+ }
+ ASSERT_EQ(skel_unload->bss->bpf_testmod_put, 1, "map_free");
+
+ bpf_link__destroy(link_map_free);
+ testmod_unload__destroy(skel_unload);
+}
+
+static void test_load_without_module(void)
+{
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+ struct struct_ops_module *skel;
+ int err;
+
+ err = unload_bpf_testmod(false);
+ if (!ASSERT_OK(err, "unload_bpf_testmod"))
+ return;
+
+ skel = struct_ops_module__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
+ goto cleanup;
+ err = struct_ops_module__load(skel);
Both the module btf and the .ko itself are gone from the kernel now?
This is basically testing libbpf cannot find 'struct bpf_testmod_ops'
from the running kernel?
Yes, you are right! So, I just rewrote this by calling bpf_map_create()
instead of calling the skeleton. To simplify the test, I actually use
bpf_map_info of an existing map created from the skeleton as inputs to
bpf_map_create(). And, the btf_obj_id (or btf_vmlinux_id) is used and
tested here.
How about create another struct_ops_module_notfound.c bpf program:
SEC(".struct_ops.link")
struct bpf_testmod_ops_notfound testmod_1 = {
.test_1 = (void *)test_1,
.test_2 = (void *)test_2,
};
and avoid the usleep() wait and the unload_bpf_testmod()?
In order to skip finding module btf for using bpf_map_create(),
I use the skeleton to create a map first to get its bpf_map_info.
So, it still needs to load and unload the same module.
+ ASSERT_ERR(err, "struct_ops_module_load");
+
+ struct_ops_module__destroy(skel);
+
+cleanup:
+ /* Without this, the next test may fail */
+ load_bpf_testmod(false);
+}
+
+void serial_test_struct_ops_module(void)
+{
+ if (test__start_subtest("regular_load"))
+ test_regular_load();
+
+ if (test__start_subtest("load_without_module"))
+ test_load_without_module();
+}
+