Allows struct_ops programs to acqurie referenced kptrs from arguments by directly reading the argument. The verifier will automatically acquire a reference for struct_ops argument tagged with "__ref" in the stub function. The user will be able to access the referenced kptr directly by reading the context as long as it has not been released by the program. This new mechanism to acquire referenced kptr (compared to the existing "kfunc with KF_ACQUIRE") is introduced for ergonomic and semantic reasons. In the first use case, Qdisc_ops, an skb is passed to .enqueue in the first argument. The qdisc becomes the sole owner of the skb and must enqueue or drop the skb. Representing skbs in bpf qdisc as referenced kptrs makes sure 1) qdisc will always enqueue or drop the skb in .enqueue, and 2) qdisc cannot make up invalid skb pointers in .dequeue. The new mechanism provides a natural way for users to get a referenced kptr in struct_ops programs. More importantly, we would also like to make sure that there is only a single reference to the same skb in qdisc. Since in the future, skb->rbnode will be utilized to support adding skb to bpf list and rbtree, allowing multiple references may lead to racy accesses to this field when the user adds references of the skb to different bpf graphs. The new mechanism provides a better way to enforce such unique ptr semantic than forbidding users to call a KF_ACQUIRE kfunc multiple times. Signed-off-by: Amery Hung <amery.hung@xxxxxxxxxxxxx> --- include/linux/bpf.h | 3 +++ kernel/bpf/bpf_struct_ops.c | 26 ++++++++++++++++++++------ kernel/bpf/btf.c | 1 + kernel/bpf/verifier.c | 34 +++++++++++++++++++++++++++++++--- 4 files changed, 55 insertions(+), 9 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index cc460786da9b..3891e45ded4e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -924,6 +924,7 @@ struct bpf_insn_access_aux { struct { struct btf *btf; u32 btf_id; + u32 ref_obj_id; }; }; struct bpf_verifier_log *log; /* for verbose logs */ @@ -1427,6 +1428,8 @@ struct bpf_ctx_arg_aux { enum bpf_reg_type reg_type; struct btf *btf; u32 btf_id; + u32 ref_obj_id; + bool refcounted; }; struct btf_mod_pair { diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 0d515ec57aa5..05f16f21981e 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -145,6 +145,7 @@ void bpf_struct_ops_image_free(void *image) } #define MAYBE_NULL_SUFFIX "__nullable" +#define REFCOUNTED_SUFFIX "__ref" #define MAX_STUB_NAME 128 /* Return the type info of a stub function, if it exists. @@ -206,9 +207,11 @@ static int prepare_arg_info(struct btf *btf, struct bpf_struct_ops_arg_info *arg_info) { const struct btf_type *stub_func_proto, *pointed_type; + bool is_nullable = false, is_refcounted = false; const struct btf_param *stub_args, *args; struct bpf_ctx_arg_aux *info, *info_buf; u32 nargs, arg_no, info_cnt = 0; + const char *suffix; u32 arg_btf_id; int offset; @@ -240,12 +243,19 @@ static int prepare_arg_info(struct btf *btf, info = info_buf; for (arg_no = 0; arg_no < nargs; arg_no++) { /* Skip arguments that is not suffixed with - * "__nullable". + * "__nullable or __ref". */ - if (!btf_param_match_suffix(btf, &stub_args[arg_no], - MAYBE_NULL_SUFFIX)) + is_nullable = btf_param_match_suffix(btf, &stub_args[arg_no], + MAYBE_NULL_SUFFIX); + is_refcounted = btf_param_match_suffix(btf, &stub_args[arg_no], + REFCOUNTED_SUFFIX); + if (!is_nullable && !is_refcounted) continue; + if (is_nullable) + suffix = MAYBE_NULL_SUFFIX; + else if (is_refcounted) + suffix = REFCOUNTED_SUFFIX; /* Should be a pointer to struct */ pointed_type = btf_type_resolve_ptr(btf, args[arg_no].type, @@ -253,7 +263,7 @@ static int prepare_arg_info(struct btf *btf, if (!pointed_type || !btf_type_is_struct(pointed_type)) { pr_warn("stub function %s__%s has %s tagging to an unsupported type\n", - st_ops_name, member_name, MAYBE_NULL_SUFFIX); + st_ops_name, member_name, suffix); goto err_out; } @@ -271,11 +281,15 @@ static int prepare_arg_info(struct btf *btf, } /* Fill the information of the new argument */ - info->reg_type = - PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL; info->btf_id = arg_btf_id; info->btf = btf; info->offset = offset; + if (is_nullable) { + info->reg_type = PTR_TRUSTED | PTR_TO_BTF_ID | PTR_MAYBE_NULL; + } else if (is_refcounted) { + info->reg_type = PTR_TRUSTED | PTR_TO_BTF_ID; + info->refcounted = true; + } info++; info_cnt++; diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index de15e8b12fae..52be35b30308 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6516,6 +6516,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, info->reg_type = ctx_arg_info->reg_type; info->btf = ctx_arg_info->btf ? : btf_vmlinux; info->btf_id = ctx_arg_info->btf_id; + info->ref_obj_id = ctx_arg_info->ref_obj_id; return true; } } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 37053cc4defe..f614ab283c37 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1367,6 +1367,17 @@ static int release_reference_state(struct bpf_func_state *state, int ptr_id) return -EINVAL; } +static bool find_reference_state(struct bpf_func_state *state, int ptr_id) +{ + int i; + + for (i = 0; i < state->acquired_refs; i++) + if (state->refs[i].id == ptr_id) + return true; + + return false; +} + static void free_func_state(struct bpf_func_state *state) { if (!state) @@ -5587,7 +5598,7 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, /* check access to 'struct bpf_context' fields. Supports fixed offsets only */ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, enum bpf_access_type t, enum bpf_reg_type *reg_type, - struct btf **btf, u32 *btf_id) + struct btf **btf, u32 *btf_id, u32 *ref_obj_id) { struct bpf_insn_access_aux info = { .reg_type = *reg_type, @@ -5606,8 +5617,16 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, *reg_type = info.reg_type; if (base_type(*reg_type) == PTR_TO_BTF_ID) { + if (info.ref_obj_id && + !find_reference_state(cur_func(env), info.ref_obj_id)) { + verbose(env, "bpf_context off=%d ref_obj_id=%d is no longer valid\n", + off, info.ref_obj_id); + return -EACCES; + } + *btf = info.btf; *btf_id = info.btf_id; + *ref_obj_id = info.ref_obj_id; } else { env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; } @@ -6878,7 +6897,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn } else if (reg->type == PTR_TO_CTX) { enum bpf_reg_type reg_type = SCALAR_VALUE; struct btf *btf = NULL; - u32 btf_id = 0; + u32 btf_id = 0, ref_obj_id = 0; if (t == BPF_WRITE && value_regno >= 0 && is_pointer_value(env, value_regno)) { @@ -6891,7 +6910,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn return err; err = check_ctx_access(env, insn_idx, off, size, t, ®_type, &btf, - &btf_id); + &btf_id, &ref_obj_id); if (err) verbose_linfo(env, insn_idx, "; "); if (!err && t == BPF_READ && value_regno >= 0) { @@ -6915,6 +6934,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn if (base_type(reg_type) == PTR_TO_BTF_ID) { regs[value_regno].btf = btf; regs[value_regno].btf_id = btf_id; + regs[value_regno].ref_obj_id = ref_obj_id; } } regs[value_regno].type = reg_type; @@ -20897,6 +20917,7 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog) { bool pop_log = !(env->log.level & BPF_LOG_LEVEL2); struct bpf_subprog_info *sub = subprog_info(env, subprog); + struct bpf_ctx_arg_aux *ctx_arg_info; struct bpf_verifier_state *state; struct bpf_reg_state *regs; int ret, i; @@ -21004,6 +21025,13 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog) mark_reg_known_zero(env, regs, BPF_REG_1); } + if (env->prog->type == BPF_PROG_TYPE_STRUCT_OPS) { + ctx_arg_info = (struct bpf_ctx_arg_aux *)env->prog->aux->ctx_arg_info; + for (i = 0; i < env->prog->aux->ctx_arg_info_size; i++) + if (ctx_arg_info[i].refcounted) + ctx_arg_info[i].ref_obj_id = acquire_reference_state(env, 0); + } + ret = do_check(env); out: /* check for NULL is necessary, since cur_state can be freed inside -- 2.20.1