Not all structures in the kernel contain reference count, such as struct socket (its reference count is actually in struct file), so it makes no sense to use a combination of KF_ACQUIRE and KF_RELEASE to trick the verifier to make the pointer to struct socket valid. This patch adds KF_OBTAIN flag for the cases where a valid pointer can be obtained but there is no need to manipulate the reference count (e.g. the structure itself has no reference count, the actual reference count is in another structure). For KF_OBTAIN kfuncs, the passed argument must be valid pointers. KF_OBTAIN kfuncs guarantees that if the pointer passed in is valid, then the pointer returned by KF_OBTAIN kfuncs is also valid. For example, bpf_socket_from_file() is a KF_OBTAIN kfunc, and if the struct file pointer passed in is valid, then the struct socket pointer returned is also valid. KF_OBTAIN kfuncs use ref_obj_id to ensure that the returned pointer has the correct ownership and lifetime. For example, if we pass pointer A to KF_OBTAIN kfunc and get returned pointer B, then once pointer A is released, pointer B will become invalid. Signed-off-by: Juntong Deng <juntong.deng@xxxxxxxxxxx> --- include/linux/btf.h | 1 + kernel/bpf/verifier.c | 14 +++++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/include/linux/btf.h b/include/linux/btf.h index cffb43133c68..85e7bf9f4410 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -75,6 +75,7 @@ #define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */ #define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */ #define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */ +#define KF_OBTAIN (1 << 12) /* kfunc is an obtain function */ /* * Tag marking a kernel function as a kfunc. This is meant to minimize the diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index ebec74c28ae3..fc812d954188 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -10972,9 +10972,15 @@ static bool is_kfunc_release(struct bpf_kfunc_call_arg_meta *meta) return meta->kfunc_flags & KF_RELEASE; } +static bool is_kfunc_obtain(struct bpf_kfunc_call_arg_meta *meta) +{ + return meta->kfunc_flags & KF_OBTAIN; +} + static bool is_kfunc_trusted_args(struct bpf_kfunc_call_arg_meta *meta) { - return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta); + return (meta->kfunc_flags & KF_TRUSTED_ARGS) || is_kfunc_release(meta) || + is_kfunc_obtain(meta); } static bool is_kfunc_sleepable(struct bpf_kfunc_call_arg_meta *meta) @@ -12832,6 +12838,12 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */ regs[BPF_REG_0].id = ++env->id_gen; } + + if (is_kfunc_obtain(&meta)) { + regs[BPF_REG_0].type |= PTR_TRUSTED; + regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; + } + mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *)); if (is_kfunc_acquire(&meta)) { int id = acquire_reference_state(env, insn_idx); -- 2.39.2