Re: [PATCH bpf-next v2 5/6] selftests/bpf: detach a struct_ops link from the subsystem managing it.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 5/8/24 16:50, Martin KaFai Lau wrote:
On 5/6/24 10:55 PM, Kui-Feng Lee wrote:
Not only a user space program can detach a struct_ops link, the subsystem
managing a link can also detach the link. This patch add a kfunc to
simulate detaching a link by the subsystem managing it.

Signed-off-by: Kui-Feng Lee <thinker.li@xxxxxxxxx>
---
  .../selftests/bpf/bpf_testmod/bpf_testmod.c   | 21 ++++++
  .../bpf/prog_tests/test_struct_ops_module.c   | 65 +++++++++++++++++++
  .../selftests/bpf/progs/struct_ops_detach.c   |  6 ++
  3 files changed, 92 insertions(+)

diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
index c89a6414c69f..0bf1acc1767a 100644
--- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
+++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
@@ -502,6 +502,26 @@ __bpf_kfunc void bpf_kfunc_call_test_sleepable(void)
  static DEFINE_MUTEX(detach_mutex);
  static struct bpf_link *link_to_detach;
+__bpf_kfunc int bpf_dummy_do_link_detach(void)
+{
+    struct bpf_link *link;
+    int ret = -ENOENT;
+
+    mutex_lock(&detach_mutex);
+    link = link_to_detach;
+    /* Make sure the link is still valid by increasing its refcnt */
+    if (link && !atomic64_inc_not_zero(&link->refcnt))

It is better to reuse the bpf_link_inc_not_zero().

I will export this function to be used by modules.


+        link = NULL;
+    mutex_unlock(&detach_mutex);
+
+    if (link) {
+        ret = link->ops->detach(link);
+        bpf_link_put(link);
+    }
+
+    return ret;
+}
+
  BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
  BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
  BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
@@ -529,6 +549,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
  BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
  BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
  BTF_ID_FLAGS(func, bpf_kfunc_call_test_sleepable, KF_SLEEPABLE)
+BTF_ID_FLAGS(func, bpf_dummy_do_link_detach)

It should need KF_SLEEPABLE. mutex is used.

To simplify it, spinlock will be used instead.


  BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
  static int bpf_testmod_ops_init(struct btf *btf)
diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
index f39455b81664..9f6657b53a93 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c
@@ -229,6 +229,69 @@ static void test_detach_link(void)
      struct_ops_detach__destroy(skel);
  }
+/* Detach a link from the subsystem that the link was registered to */
+static void test_subsystem_detach(void)
+{
+    LIBBPF_OPTS(bpf_test_run_opts, topts,
+            .data_in = &pkt_v4,
+            .data_size_in = sizeof(pkt_v4));
+    struct epoll_event ev, events[2];
+    struct struct_ops_detach *skel;
+    struct bpf_link *link = NULL;
+    int fd, epollfd = -1, nfds;
+    int prog_fd;
+    int err;
+
+    skel = struct_ops_detach__open_and_load();
+    if (!ASSERT_OK_PTR(skel, "struct_ops_detach_open_and_load"))
+        return;
+
+    link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach);
+    if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
+        goto cleanup;
+
+    fd = bpf_link__fd(link);
+    if (!ASSERT_GE(fd, 0, "link_fd"))
+        goto cleanup;
+
+    prog_fd = bpf_program__fd(skel->progs.start_detach);
+    if (!ASSERT_GE(prog_fd, 0, "start_detach_fd"))
+        goto cleanup;
+
+    /* Do detachment from the registered subsystem */
+    err = bpf_prog_test_run_opts(prog_fd, &topts);
+    if (!ASSERT_OK(err, "start_detach_run"))
+        goto cleanup;
+
+    if (!ASSERT_EQ(topts.retval, 0, "start_detach_run retval"))
+        goto cleanup;
+
+    epollfd = epoll_create1(0);
+    if (!ASSERT_GE(epollfd, 0, "epoll_create1"))
+        goto cleanup;
+
+    ev.events = EPOLLHUP;
+    ev.data.fd = fd;
+    err = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev);
+    if (!ASSERT_OK(err, "epoll_ctl"))
+        goto cleanup;
+
+    /* Wait for EPOLLHUP */
+    nfds = epoll_wait(epollfd, events, 2, 5000);
+    if (!ASSERT_EQ(nfds, 1, "epoll_wait"))
+        goto cleanup;
+
+    if (!ASSERT_EQ(events[0].data.fd, fd, "epoll_wait_fd"))
+        goto cleanup;
+    if (!ASSERT_TRUE(events[0].events & EPOLLHUP, "events[0].events"))
+        goto cleanup;
+
+cleanup:
+    close(epollfd);
+    bpf_link__destroy(link);
+    struct_ops_detach__destroy(skel);
+}
+
  void serial_test_struct_ops_module(void)
  {
      if (test__start_subtest("test_struct_ops_load"))
@@ -239,5 +302,7 @@ void serial_test_struct_ops_module(void)
          test_struct_ops_incompatible();
      if (test__start_subtest("test_detach_link"))
          test_detach_link();
+    if (test__start_subtest("test_subsystem_detach"))
+        test_subsystem_detach();
  }
diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
index aeb355b3bea3..139f9a5c5601 100644
--- a/tools/testing/selftests/bpf/progs/struct_ops_detach.c
+++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c
@@ -29,3 +29,9 @@ struct bpf_testmod_ops testmod_do_detach = {
      .test_1 = (void *)test_1,
      .test_2 = (void *)test_2,
  };
+
+SEC("tc")

The bpf_dummy_do_link_detach() uses a mutex. There is no lockdep splat in the test?

+int start_detach(void *skb)
+{
+    return bpf_dummy_do_link_detach();
+}





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux