On 10/31/23 5:30 PM, Kui-Feng Lee wrote:
On 10/30/23 23:59, Martin KaFai Lau wrote:
On 10/30/23 12:28 PM, thinker.li@xxxxxxxxx wrote:
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
new file mode 100644
index 000000000000..3a00dc294583
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <time.h>
+
+#include "rcu_tasks_trace_gp.skel.h"
+#include "struct_ops_module.skel.h"
+
+static void test_regular_load(void)
+{
+ struct struct_ops_module *skel;
+ struct bpf_link *link;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+ int err;
+
+ skel = struct_ops_module__open_opts(&opts);
+ if (!ASSERT_OK_PTR(skel, "struct_ops_module_open"))
+ return;
+ err = struct_ops_module__load(skel);
+ if (!ASSERT_OK(err, "struct_ops_module_load"))
+ return;
+
+ link = bpf_map__attach_struct_ops(skel->maps.testmod_1);
+ ASSERT_OK_PTR(link, "attach_test_mod_1");
+
+ /* test_2() will be called from bpf_dummy_reg() in bpf_testmod.c */
+ ASSERT_EQ(skel->bss->test_2_result, 7, "test_2_result");
+
+ bpf_link__destroy(link);
+
+ struct_ops_module__destroy(skel);
+}
+
+void serial_test_struct_ops_module(void)
+{
+ if (test__start_subtest("regular_load"))
+ test_regular_load();
Could it also add some negative tests, e.g. missing 'struct
bpf_struct_ops_common_value', reg() when the module is gone...etc.
[ ... ]
+/* This function will trigger call_rcu_tasks_trace() in the kernel */
+static int kern_sync_rcu_tasks_trace(void)
With patch 4, is it still needed?
Patch 4 shortens time of holding the module, but it still can happen
since bpf_link_put() is performed asynchronously.
Is the link pinned to a file that triggers bpf_link_put()?
Otherwise, close() should reach bpf_link_put_direct() which is synchronous.
Even if it went through bpf_link_put(), rcu_tasks_trace_gp is very specific to
the bpf sleepable tracing prog. Is it the correct one to wait?
+{
+ struct rcu_tasks_trace_gp *rcu;
+ time_t start;
+ long gp_seq;
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+ rcu = rcu_tasks_trace_gp__open_and_load();
+ if (IS_ERR(rcu))
+ return -EFAULT;
+ if (rcu_tasks_trace_gp__attach(rcu))
+ return -EFAULT;
+
+ gp_seq = READ_ONCE(rcu->bss->gp_seq);
+
+ if
(bpf_prog_test_run_opts(bpf_program__fd(rcu->progs.do_call_rcu_tasks_trace),
+ &opts))
+ return -EFAULT;
+ if (opts.retval != 0)
+ return -EFAULT;
+
+ start = time(NULL);
+ while ((start + 2) > time(NULL) &&
+ gp_seq == READ_ONCE(rcu->bss->gp_seq))
+ sched_yield();
+
+ rcu_tasks_trace_gp__destroy(rcu);
+
+ return 0;
+}
+
/*
* Trigger synchronize_rcu() in kernel.
*/
int kern_sync_rcu(void)
{
+ if (kern_sync_rcu_tasks_trace())
+ return -EFAULT;
return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0);
}