On Sat, 1 Oct 2022 at 16:47, David Vernet <void@xxxxxxxxxxxxx> wrote: > > A previous change added a series of kfuncs for storing struct > task_struct objects as referenced kptrs. This patch adds a new > task_kfunc test suite for validating their expected behavior. > > Signed-off-by: David Vernet <void@xxxxxxxxxxxxx> > --- > tools/testing/selftests/bpf/DENYLIST.s390x | 1 + > .../selftests/bpf/prog_tests/task_kfunc.c | 155 ++++++++++++ > .../selftests/bpf/progs/task_kfunc_common.h | 83 +++++++ > .../selftests/bpf/progs/task_kfunc_failure.c | 225 ++++++++++++++++++ > .../selftests/bpf/progs/task_kfunc_success.c | 113 +++++++++ > 5 files changed, 577 insertions(+) > create mode 100644 tools/testing/selftests/bpf/prog_tests/task_kfunc.c > create mode 100644 tools/testing/selftests/bpf/progs/task_kfunc_common.h > create mode 100644 tools/testing/selftests/bpf/progs/task_kfunc_failure.c > create mode 100644 tools/testing/selftests/bpf/progs/task_kfunc_success.c > > diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x > index 17e074eb42b8..4c34818ec1ee 100644 > --- a/tools/testing/selftests/bpf/DENYLIST.s390x > +++ b/tools/testing/selftests/bpf/DENYLIST.s390x > @@ -75,3 +75,4 @@ user_ringbuf # failed to find kernel BTF type ID of > lookup_key # JIT does not support calling kernel function (kfunc) > verify_pkcs7_sig # JIT does not support calling kernel function (kfunc) > kfunc_dynptr_param # JIT does not support calling kernel function (kfunc) > +task_kfunc # JIT does not support calling kernel function > diff --git a/tools/testing/selftests/bpf/prog_tests/task_kfunc.c b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c > new file mode 100644 > index 000000000000..6c577fbca8f7 > --- /dev/null > +++ b/tools/testing/selftests/bpf/prog_tests/task_kfunc.c > @@ -0,0 +1,155 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ > + > +#define _GNU_SOURCE > +#include <sys/wait.h> > +#include <test_progs.h> > +#include <unistd.h> > + > +#include "task_kfunc_failure.skel.h" > +#include "task_kfunc_success.skel.h" > + > +static size_t log_buf_sz = 1 << 20; /* 1 MB */ > +static char obj_log_buf[1048576]; > + > +static struct task_kfunc_success *open_load_task_kfunc_skel(void) > +{ > + struct task_kfunc_success *skel; > + int err; > + > + skel = task_kfunc_success__open(); > + if (!ASSERT_OK_PTR(skel, "skel_open")) > + return NULL; > + > + skel->bss->pid = getpid(); > + > + err = task_kfunc_success__load(skel); > + if (!ASSERT_OK(err, "skel_load")) > + goto cleanup; > + > + return skel; > + > +cleanup: > + task_kfunc_success__destroy(skel); > + return NULL; > +} > + > +static void run_success_test(const char *prog_name) > +{ > + struct task_kfunc_success *skel; > + int status; > + pid_t child_pid; > + struct bpf_program *prog; > + struct bpf_link *link = NULL; > + > + skel = open_load_task_kfunc_skel(); > + if (!ASSERT_OK_PTR(skel, "open_load_skel")) > + return; > + > + if (!ASSERT_OK(skel->bss->err, "pre_spawn_err")) > + goto cleanup; > + > + prog = bpf_object__find_program_by_name(skel->obj, prog_name); > + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) > + goto cleanup; > + > + link = bpf_program__attach(prog); > + if (!ASSERT_OK_PTR(link, "attached_link")) > + goto cleanup; > + > + child_pid = fork(); > + if (!ASSERT_GT(child_pid, -1, "child_pid")) > + goto cleanup; > + if (child_pid == 0) > + _exit(0); > + waitpid(child_pid, &status, 0); > + > + ASSERT_OK(skel->bss->err, "post_wait_err"); > + > +cleanup: > + bpf_link__destroy(link); > + task_kfunc_success__destroy(skel); > +} > + > +static const char * const success_tests[] = { > + "test_task_acquire_release", > + "test_task_acquire_leave_in_map", > + "test_task_xchg_release", > + "test_task_get_release", > +}; > + > +static struct { > + const char *prog_name; > + const char *expected_err_msg; > +} failure_tests[] = { > + {"task_kfunc_acquire_untrusted", "arg#0 pointer type STRUCT task_struct must point"}, > + {"task_kfunc_acquire_null", "arg#0 pointer type STRUCT task_struct must point"}, > + {"task_kfunc_acquire_unreleased", "Unreleased reference"}, > + {"task_kfunc_get_non_kptr_param", "arg#0 expected pointer to map value"}, > + {"task_kfunc_get_non_kptr_acquired", "arg#0 expected pointer to map value"}, > + {"task_kfunc_get_null", "arg#0 expected pointer to map value"}, > + {"task_kfunc_xchg_unreleased", "Unreleased reference"}, > + {"task_kfunc_get_unreleased", "Unreleased reference"}, > + {"task_kfunc_release_untrusted", "arg#0 pointer type STRUCT task_struct must point"}, > + {"task_kfunc_release_null", "arg#0 pointer type STRUCT task_struct must point"}, > + {"task_kfunc_release_unacquired", "R1 must be referenced"}, > +}; > + > +static void verify_fail(const char *prog_name, const char *expected_err_msg) > +{ > + LIBBPF_OPTS(bpf_object_open_opts, opts); > + struct task_kfunc_failure *skel; > + int err, i; > + > + opts.kernel_log_buf = obj_log_buf; > + opts.kernel_log_size = log_buf_sz; > + opts.kernel_log_level = 1; > + > + skel = task_kfunc_failure__open_opts(&opts); > + if (!ASSERT_OK_PTR(skel, "task_kfunc_failure__open_opts")) > + goto cleanup; > + > + skel->bss->pid = getpid(); > + > + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { > + struct bpf_program *prog; > + const char *curr_name = failure_tests[i].prog_name; > + > + prog = bpf_object__find_program_by_name(skel->obj, curr_name); > + if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) > + goto cleanup; > + > + bpf_program__set_autoload(prog, !strcmp(curr_name, prog_name)); > + } > + > + err = task_kfunc_failure__load(skel); > + if (!ASSERT_ERR(err, "unexpected load success")) > + goto cleanup; > + > + if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) { > + fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg); > + fprintf(stderr, "Verifier output: %s\n", obj_log_buf); > + } > + > +cleanup: > + task_kfunc_failure__destroy(skel); > +} > + > +void test_task_kfunc(void) > +{ > + int i; > + > + for (i = 0; i < ARRAY_SIZE(success_tests); i++) { > + if (!test__start_subtest(success_tests[i])) > + continue; > + > + run_success_test(success_tests[i]); > + } > + > + for (i = 0; i < ARRAY_SIZE(failure_tests); i++) { > + if (!test__start_subtest(failure_tests[i].prog_name)) > + continue; > + > + verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg); > + } > +} > diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_common.h b/tools/testing/selftests/bpf/progs/task_kfunc_common.h > new file mode 100644 > index 000000000000..bbb0a40572fd > --- /dev/null > +++ b/tools/testing/selftests/bpf/progs/task_kfunc_common.h > @@ -0,0 +1,83 @@ > +/* SPDX-License-Identifier: GPL-2.0 */ > +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ > + > +#ifndef _TASK_KFUNC_COMMON_H > +#define _TASK_KFUNC_COMMON_H > + > +#include <errno.h> > +#include <vmlinux.h> > +#include <bpf/bpf_tracing.h> > +#include <bpf/bpf_helpers.h> > + > +struct __tasks_kfunc_map_value { > + struct task_struct __kptr_ref * task; > +}; > + > +struct hash_map { > + __uint(type, BPF_MAP_TYPE_HASH); > + __type(key, int); > + __type(value, struct __tasks_kfunc_map_value); > + __uint(max_entries, 1); > +} __tasks_kfunc_map SEC(".maps"); > + > +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; > +struct task_struct *bpf_task_kptr_get(struct task_struct **pp) __ksym; > +void bpf_task_release(struct task_struct *p) __ksym; > + > +#define TEST_NAME_SZ 128 > + > +/* The pid of the test process used to determine if a newly created task is the test task. */ > +int pid; > + > +static inline struct __tasks_kfunc_map_value *tasks_kfunc_map_value_lookup(struct task_struct *p) > +{ > + s32 pid; > + long status; > + > + status = bpf_probe_read_kernel(&pid, sizeof(pid), &p->pid); > + if (status) > + return NULL; > + > + return bpf_map_lookup_elem(&__tasks_kfunc_map, &pid); > +} > + > +static inline int tasks_kfunc_map_insert(struct task_struct *p) > +{ > + struct __tasks_kfunc_map_value local, *v; > + long status; > + struct task_struct *acquired, *old; > + s32 pid; > + > + status = bpf_probe_read_kernel(&pid, sizeof(pid), &p->pid); > + if (status) > + return status; > + > + local.task = NULL; > + status = bpf_map_update_elem(&__tasks_kfunc_map, &pid, &local, BPF_NOEXIST); > + if (status) > + return status; > + > + v = bpf_map_lookup_elem(&__tasks_kfunc_map, &pid); > + if (!v) { > + bpf_map_delete_elem(&__tasks_kfunc_map, &pid); > + return status; > + } > + > + acquired = bpf_task_acquire(p); > + old = bpf_kptr_xchg(&v->task, acquired); > + if (old) { > + bpf_task_release(old); > + return -EEXIST; > + } > + > + return 0; > +} > + > +static inline bool is_test_kfunc_task(struct task_struct *task) > +{ > + int cur_pid = bpf_get_current_pid_tgid() >> 32; > + > + return pid == cur_pid; > +} > + > +#endif /* _TASK_KFUNC_COMMON_H */ > diff --git a/tools/testing/selftests/bpf/progs/task_kfunc_failure.c b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c > new file mode 100644 > index 000000000000..4cf01bbc8a16 > --- /dev/null > +++ b/tools/testing/selftests/bpf/progs/task_kfunc_failure.c > @@ -0,0 +1,225 @@ > +// SPDX-License-Identifier: GPL-2.0 > +/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ > + > +#include <vmlinux.h> > +#include <bpf/bpf_tracing.h> > +#include <bpf/bpf_helpers.h> > + > +#include "task_kfunc_common.h" > + > +char _license[] SEC("license") = "GPL"; > + > +/* Prototype for all of the program trace events below: > + * > + * TRACE_EVENT(task_newtask, > + * TP_PROTO(struct task_struct *p, u64 clone_flags) > + */ > + > +SEC("tp_btf/task_newtask") > +int BPF_PROG(task_kfunc_acquire_untrusted, struct task_struct *task, u64 clone_flags) > +{ > + struct task_struct *acquired, *stack_ptr; > + > + if (!is_test_kfunc_task(task)) > + return 0; > + > + /* Can't invoke bpf_task_acquire() on an untrusted, random pointer. */ > + stack_ptr = (struct task_struct *)0xcafef00d; This seems like a misleading comment. 'stack_ptr' would just be a scalar, not a pointer. Maybe you should be testing this for an actual PTR_UNTRUSTED pointer instead. Load of a __kptr tagged pointer would be a good way. Very soon a lot of other pointers obtained from pointer walking are going to be marked PTR_UNTRUSTED, so then we would cover those as well similar to this test. Also, could you include a test to make sure sleepable programs cannot call bpf_task_acquire? It seems to assume RCU read lock is held while that may not be true. If already not possible, maybe a WARN_ON_ONCE inside the helper to ensure future cases don't creep in. > [...]