This patch adds three tests to demonstrate these patterns and validating correctness. test1: We use bpf_for_each(process, task) to iterate all processed in the system and search for the current process with a given pid. test2: We create a cgroup and add the current process to the cgroup. In the BPF program, we would use bpf_for_each(css_task, task, css) to iterate all tasks under the cgroup. As expected, we would find the current process. test3: We create a cgroup tree. In the BPF program, we use bpf_for_each(css, pos, root) to iterate all descendant under the root with post order. As expected, we would find all descendant and the last iterating cgroup is root cgroup. Signed-off-by: Chuyi Zhou <zhouchuyi@xxxxxxxxxxxxx> --- .../testing/selftests/bpf/prog_tests/iters.c | 123 ++++++++++++++++++ .../testing/selftests/bpf/progs/iters_task.c | 83 ++++++++++++ 2 files changed, 206 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/iters_task.c diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c index 10804ae5ae97..88fb565a3e97 100644 --- a/tools/testing/selftests/bpf/prog_tests/iters.c +++ b/tools/testing/selftests/bpf/prog_tests/iters.c @@ -2,12 +2,14 @@ /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ #include <test_progs.h> +#include "cgroup_helpers.h" #include "iters.skel.h" #include "iters_state_safety.skel.h" #include "iters_looping.skel.h" #include "iters_num.skel.h" #include "iters_testmod_seq.skel.h" +#include "iters_task.skel.h" static void subtest_num_iters(void) { @@ -90,6 +92,121 @@ static void subtest_testmod_seq_iters(void) iters_testmod_seq__destroy(skel); } +static void subtest_process_iters(void) +{ + struct iters_task *skel; + int err; + + skel = iters_task__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + bpf_program__set_autoload(skel->progs.iter_task_for_each, true); + err = iters_task__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + skel->bss->target_pid = getpid(); + err = iters_task__attach(skel); + if (!ASSERT_OK(err, "iters_task__attach")) + goto cleanup; + + iters_task__detach(skel); + ASSERT_EQ(skel->bss->process_cnt, 1, "process_cnt"); + +cleanup: + iters_task__destroy(skel); +} + +static void subtest_css_task_iters(void) +{ + struct iters_task *skel; + int err, cg_fd, cg_id; + const char *cgrp_path = "/cg1"; + + err = setup_cgroup_environment(); + if (!ASSERT_OK(err, "setup_cgroup_environment")) + goto cleanup; + cg_fd = create_and_get_cgroup(cgrp_path); + if (!ASSERT_GE(cg_fd, 0, "cg_create")) + goto cleanup; + cg_id = get_cgroup_id(cgrp_path); + err = join_cgroup(cgrp_path); + if (!ASSERT_OK(err, "setup_cgroup_environment")) + goto cleanup; + + skel = iters_task__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + bpf_program__set_autoload(skel->progs.iter_css_task_for_each, true); + err = iters_task__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + skel->bss->target_pid = getpid(); + skel->bss->cg_id = cg_id; + err = iters_task__attach(skel); + + if (!ASSERT_OK(err, "iters_task__attach")) + goto cleanup; + + iters_task__detach(skel); + ASSERT_EQ(skel->bss->css_task_cnt, 1, "css_task_cnt"); + +cleanup: + cleanup_cgroup_environment(); + iters_task__destroy(skel); +} + +static void subtest_css_dec_iters(void) +{ + struct iters_task *skel; + struct { + const char *path; + int fd; + } cgs[] = { + { "/cg1" }, + { "/cg1/cg2" }, + { "/cg1/cg2/cg3" }, + { "/cg1/cg2/cg3/cg4" }, + }; + int err, cg_nr = ARRAY_SIZE(cgs); + int i; + + err = setup_cgroup_environment(); + if (!ASSERT_OK(err, "setup_cgroup_environment")) + goto cleanup; + for (i = 0; i < cg_nr; i++) { + cgs[i].fd = create_and_get_cgroup(cgs[i].path); + if (!ASSERT_GE(cgs[i].fd, 0, "cg_create")) + goto cleanup; + } + + skel = iters_task__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + bpf_program__set_autoload(skel->progs.iter_css_dec_for_each, true); + err = iters_task__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + skel->bss->target_pid = getpid(); + skel->bss->cg_id = get_cgroup_id(cgs[0].path); + + err = iters_task__attach(skel); + + if (!ASSERT_OK(err, "iters_task__attach")) + goto cleanup; + + iters_task__detach(skel); + ASSERT_EQ(skel->bss->css_dec_cnt, cg_nr, "post order search dec count"); + ASSERT_EQ(skel->bss->last_cg_id, get_cgroup_id(cgs[0].path), + "post order search last cgroup id"); + +cleanup: + cleanup_cgroup_environment(); + iters_task__destroy(skel); +} + void test_iters(void) { RUN_TESTS(iters_state_safety); @@ -103,4 +220,10 @@ void test_iters(void) subtest_num_iters(); if (test__start_subtest("testmod_seq")) subtest_testmod_seq_iters(); + if (test__start_subtest("process")) + subtest_process_iters(); + if (test__start_subtest("css_task")) + subtest_css_task_iters(); + if (test__start_subtest("css_dec")) + subtest_css_dec_iters(); } diff --git a/tools/testing/selftests/bpf/progs/iters_task.c b/tools/testing/selftests/bpf/progs/iters_task.c new file mode 100644 index 000000000000..524926b505b0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_task.c @@ -0,0 +1,83 @@ +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_tracing.h> + +char _license[] SEC("license") = "GPL"; + +pid_t target_pid = 0; +int process_cnt = 0; +int css_task_cnt = 0; +int css_dec_cnt = 0; + +u64 cg_id; +u64 last_cg_id; + +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp) __ksym; +void bpf_cgroup_release(struct cgroup *p) __ksym; + +SEC("?tp_btf/sys_enter") +int BPF_PROG(iter_task_for_each) +{ + struct task_struct *task; + struct task_struct *cur_task = bpf_get_current_task_btf(); + + if (cur_task->pid != target_pid) + return 0; + + bpf_for_each(process, task) + if (task->pid == target_pid) + process_cnt += 1; + + return 0; +} + +SEC("?tp_btf/sys_enter") +int iter_css_task_for_each(const void *ctx) +{ + struct task_struct *task; + struct task_struct *cur_task = bpf_get_current_task_btf(); + + if (cur_task->pid != target_pid) + return 0; + + struct cgroup *cgrp = bpf_cgroup_from_id(cg_id); + + if (cgrp == NULL) + return 0; + struct cgroup_subsys_state *css = &cgrp->self; + + bpf_for_each(css_task, task, css, 0) + if (task->pid == target_pid) + css_task_cnt += 1; + + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("?tp_btf/sys_enter") +int iter_css_dec_for_each(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + bool is_post_order = true; + + if (cur_task->pid != target_pid) + return 0; + + struct cgroup *cgrp = bpf_cgroup_from_id(cg_id); + + if (cgrp == NULL) + return 0; + struct cgroup_subsys_state *root = &cgrp->self; + struct cgroup_subsys_state *pos = NULL; + + bpf_for_each(css, pos, root, is_post_order) { + struct cgroup *cur_cgrp = pos->cgroup; + + css_dec_cnt += 1; + if (cur_cgrp) + last_cg_id = cur_cgrp->kn->id; + } + bpf_cgroup_release(cgrp); + return 0; +} -- 2.20.1