ops.dispatch() and ops.yield() may be fed a NULL task_struct pointer. set_arg_maybe_null() is used to tell the verifier that they should be NULL checked before being dereferenced. BPF now has an a lot prettier way to express this - tagging arguments in CFI stubs with __nullable. Replace set_arg_maybe_null() with __nullable CFI stub tags. Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> Cc: Martin KaFai Lau <martin.lau@xxxxxxxxxx> Cc: Alexei Starovoitov <ast@xxxxxxxxxx> --- kernel/sched/ext.c | 66 +---------------------------------------------------- 1 file changed, 2 insertions(+), 64 deletions(-) --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5407,67 +5407,8 @@ err_disable: #include <linux/bpf.h> #include <linux/btf.h> -extern struct btf *btf_vmlinux; static const struct btf_type *task_struct_type; -static bool set_arg_maybe_null(const char *op, int arg_n, int off, int size, - enum bpf_access_type type, - const struct bpf_prog *prog, - struct bpf_insn_access_aux *info) -{ - struct btf *btf = bpf_get_btf_vmlinux(); - const struct bpf_struct_ops_desc *st_ops_desc; - const struct btf_member *member; - const struct btf_type *t; - u32 btf_id, member_idx; - const char *mname; - - /* struct_ops op args are all sequential, 64-bit numbers */ - if (off != arg_n * sizeof(__u64)) - return false; - - /* btf_id should be the type id of struct sched_ext_ops */ - btf_id = prog->aux->attach_btf_id; - st_ops_desc = bpf_struct_ops_find(btf, btf_id); - if (!st_ops_desc) - return false; - - /* BTF type of struct sched_ext_ops */ - t = st_ops_desc->type; - - member_idx = prog->expected_attach_type; - if (member_idx >= btf_type_vlen(t)) - return false; - - /* - * Get the member name of this struct_ops program, which corresponds to - * a field in struct sched_ext_ops. For example, the member name of the - * dispatch struct_ops program (callback) is "dispatch". - */ - member = &btf_type_member(t)[member_idx]; - mname = btf_name_by_offset(btf_vmlinux, member->name_off); - - if (!strcmp(mname, op)) { - /* - * The value is a pointer to a type (struct task_struct) given - * by a BTF ID (PTR_TO_BTF_ID). It is trusted (PTR_TRUSTED), - * however, can be a NULL (PTR_MAYBE_NULL). The BPF program - * should check the pointer to make sure it is not NULL before - * using it, or the verifier will reject the program. - * - * Longer term, this is something that should be addressed by - * BTF, and be fully contained within the verifier. - */ - info->reg_type = PTR_MAYBE_NULL | PTR_TO_BTF_ID | PTR_TRUSTED; - info->btf = btf_vmlinux; - info->btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK]; - - return true; - } - - return false; -} - static bool bpf_scx_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, @@ -5475,9 +5416,6 @@ static bool bpf_scx_is_valid_access(int { if (type != BPF_READ) return false; - if (set_arg_maybe_null("dispatch", 1, off, size, type, prog, info) || - set_arg_maybe_null("yield", 1, off, size, type, prog, info)) - return true; if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) return false; if (off % size != 0) @@ -5637,13 +5575,13 @@ static int bpf_scx_validate(void *kdata) static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; } static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {} static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {} -static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *p) {} +static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *prev__nullable) {} static void sched_ext_ops__tick(struct task_struct *p) {} static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {} static void sched_ext_ops__running(struct task_struct *p) {} static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {} static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {} -static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to) { return false; } +static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to__nullable) { return false; } static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; } static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {} static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}