On Fri, Aug 19, 2022 at 3:09 PM Kui-Feng Lee <kuifeng@xxxxxx> wrote: > > Test iterators of vma, files, and tasks of tasks. > > Ensure the API works appropriately to visit all tasks, > tasks in a process, or a particular task. > > Signed-off-by: Kui-Feng Lee <kuifeng@xxxxxx> > --- > .../selftests/bpf/prog_tests/bpf_iter.c | 284 +++++++++++++++++- > .../selftests/bpf/prog_tests/btf_dump.c | 2 +- > .../selftests/bpf/progs/bpf_iter_task.c | 9 + > .../selftests/bpf/progs/bpf_iter_task_file.c | 9 +- > .../selftests/bpf/progs/bpf_iter_task_vma.c | 6 +- > .../bpf/progs/bpf_iter_uprobe_offset.c | 35 +++ > 6 files changed, 326 insertions(+), 19 deletions(-) > create mode 100644 tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c > [...] > - do_dummy_read(skel->progs.dump_task); > + if (!ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock")) > + goto done; > + locked = true; > + > + if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), > + "pthread_create")) > + goto done; > + > + extra empty line > + skel->bss->tid = getpid(); > + > + do_dummy_read_opts(skel->progs.dump_task, opts); > + > + *num_unknown = skel->bss->num_unknown_tid; > + *num_known = skel->bss->num_known_tid; > + > + ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock"); > + locked = false; > + ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL, > + "pthread_join"); > > +done: > + if (locked) it's a bit of an overkill to expect and handle that pthread_mutex_lock() might fail, I'd remove those asserts and locked flag, just assume that lock works (if it's not, it's either test bug and would be caught early, or something is very broken in the system anyway) > + ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock"); > bpf_iter_task__destroy(skel); > } > [...] > static void test_task_sleepable(void) > { > struct bpf_iter_task *skel; > @@ -212,15 +349,13 @@ static void test_task_stack(void) > bpf_iter_task_stack__destroy(skel); > } > > -static void *do_nothing(void *arg) > -{ > - pthread_exit(arg); > -} > - > static void test_task_file(void) > { > + DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); DECLARE_LIBBPF_OPTS is discouraged, best to use shorter LIBBPF_OPTS > struct bpf_iter_task_file *skel; > + union bpf_iter_link_info linfo; > pthread_t thread_id; > + bool locked = false; > void *ret; > > skel = bpf_iter_task_file__open_and_load(); > @@ -229,19 +364,43 @@ static void test_task_file(void) > > skel->bss->tgid = getpid(); > > - if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing, NULL), > + if (!ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock")) > + goto done; > + locked = true; same about failing mutex_lock, it shouldn't and it's fair to expect that it won't > + > + if (!ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL), > "pthread_create")) > goto done; > > - do_dummy_read(skel->progs.dump_task_file); > + memset(&linfo, 0, sizeof(linfo)); > + linfo.task.tid = getpid(); > + opts.link_info = &linfo; > + opts.link_info_len = sizeof(linfo); [...] > + link = bpf_program__attach_iter(skel->progs.get_uprobe_offset, opts); > + if (!ASSERT_OK_PTR(link, "attach_iter")) > + return; > + > + iter_fd = bpf_iter_create(bpf_link__fd(link)); > + if (!ASSERT_GT(iter_fd, 0, "create_iter")) > + goto exit; > + > + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) > + ; > + CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)); no checks, please > + buf[15] = 0; > + ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp"); > + > + ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset"); > + if (one_proc) > + ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count"); > + else > + ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count"); > + > + close(iter_fd); > + > +exit: > + bpf_link__destroy(link); > +} > + > +static void test_task_uprobe_offset(void) > +{ > + LIBBPF_OPTS(bpf_iter_attach_opts, opts); > + union bpf_iter_link_info linfo; > + > + memset(&linfo, 0, sizeof(linfo)); > + linfo.task.pid = getpid(); > + opts.link_info = &linfo; > + opts.link_info_len = sizeof(linfo); > + > + test_task_uprobe_offset_common(&opts, true); > + > + linfo.task.pid = 0; > + linfo.task.tid = getpid(); > + test_task_uprobe_offset_common(&opts, true); > + > + test_task_uprobe_offset_common(NULL, false); > +} > + > void test_bpf_iter(void) > { > + if (!ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init")) > + return; > + ditto, too paranoid, IMO > if (test__start_subtest("btf_id_or_null")) > test_btf_id_or_null(); > if (test__start_subtest("ipv6_route")) [...] > diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c b/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c > new file mode 100644 > index 000000000000..825ca86678bd > --- /dev/null > +++ b/tools/testing/selftests/bpf/progs/bpf_iter_uprobe_offset.c > @@ -0,0 +1,35 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ > +#include "bpf_iter.h" > +#include <bpf/bpf_helpers.h> > + > +char _license[] SEC("license") = "GPL"; > + > +__u32 unique_tgid_cnt = 0; > +uintptr_t address = 0; > +uintptr_t offset = 0; > +__u32 last_tgid = 0; > +__u32 pid = 0; > + > +SEC("iter/task_vma") int get_uprobe_offset(struct bpf_iter__task_vma *ctx) please keep SEC() on separate line > +{ > + struct vm_area_struct *vma = ctx->vma; > + struct seq_file *seq = ctx->meta->seq; > + struct task_struct *task = ctx->task; > + > + if (task == NULL || vma == NULL) > + return 0; > + > + if (last_tgid != task->tgid) > + unique_tgid_cnt++; > + last_tgid = task->tgid; > + > + if (task->tgid != pid) > + return 0; > + > + if (vma->vm_start <= address && vma->vm_end > address) { > + offset = address - vma->vm_start + (vma->vm_pgoff << 12); it's best not to assume page_size is 4K, you can pass actual value through global variable from user-space (we've previously fixed a bunch of tests with fixed page_size assumption as they break some platforms, let's not regress that) > + BPF_SEQ_PRINTF(seq, "OK\n"); > + } > + return 0; > +} > -- > 2.30.2 >