Re: [PATCH bpf-next v2 3/4] bpf: Add a kfunc for generic type cast

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Sun, Nov 20, 2022 at 08:15:27AM -0800, Yonghong Song wrote:
> Implement bpf_rdonly_cast() which tries to cast the object
> to a specified type. This tries to support use case like below:
>   #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
> where skb_end_pointer(SKB) is a 'unsigned char *' and needs to
> be casted to 'struct skb_shared_info *'.
> 
> The signature of bpf_rdonly_cast() looks like
>    void *bpf_rdonly_cast(void *obj, __u32 btf_id)
> The function returns the same 'obj' but with PTR_TO_BTF_ID with
> btf_id. The verifier will ensure btf_id being a struct type.
> 
> Since the supported type cast may not reflect what the 'obj'
> represents, the returned btf_id is marked as PTR_UNTRUSTED, so
> the return value and subsequent pointer chasing cannot be
> used as helper/kfunc arguments.
> 
> Signed-off-by: Yonghong Song <yhs@xxxxxx>
> ---
>  kernel/bpf/helpers.c  |  6 ++++++
>  kernel/bpf/verifier.c | 26 ++++++++++++++++++++++++--
>  2 files changed, 30 insertions(+), 2 deletions(-)
> 
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index dc6e994feeb9..9d9b91d2d047 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -1829,6 +1829,11 @@ void *bpf_cast_to_kern_ctx(void *obj)
>  	return obj;
>  }
>  
> +void *bpf_rdonly_cast(void *obj__ign, u32 btf_id__k)
> +{
> +	return obj__ign;
> +}
> +
>  __diag_pop();
>  
>  BTF_SET8_START(generic_btf_ids)
> @@ -1850,6 +1855,7 @@ static const struct btf_kfunc_id_set generic_kfunc_set = {
>  
>  BTF_SET8_START(common_btf_ids)
>  BTF_ID_FLAGS(func, bpf_cast_to_kern_ctx)
> +BTF_ID_FLAGS(func, bpf_rdonly_cast)
>  BTF_SET8_END(common_btf_ids)
>  
>  static const struct btf_kfunc_id_set common_kfunc_set = {
> diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
> index a18b519c5225..3f1094efdb04 100644
> --- a/kernel/bpf/verifier.c
> +++ b/kernel/bpf/verifier.c
> @@ -8119,6 +8119,7 @@ enum special_kfunc_type {
>  	KF_bpf_list_pop_front,
>  	KF_bpf_list_pop_back,
>  	KF_bpf_cast_to_kern_ctx,
> +	KF_bpf_rdonly_cast,
>  };
>  
>  BTF_SET_START(special_kfunc_set)
> @@ -8129,6 +8130,7 @@ BTF_ID(func, bpf_list_push_back)
>  BTF_ID(func, bpf_list_pop_front)
>  BTF_ID(func, bpf_list_pop_back)
>  BTF_ID(func, bpf_cast_to_kern_ctx)
> +BTF_ID(func, bpf_rdonly_cast)
>  BTF_SET_END(special_kfunc_set)
>  
>  BTF_ID_LIST(special_kfunc_list)
> @@ -8139,6 +8141,7 @@ BTF_ID(func, bpf_list_push_back)
>  BTF_ID(func, bpf_list_pop_front)
>  BTF_ID(func, bpf_list_pop_back)
>  BTF_ID(func, bpf_cast_to_kern_ctx)
> +BTF_ID(func, bpf_rdonly_cast)
>  
>  static enum kfunc_ptr_arg_type
>  get_kfunc_ptr_arg_type(struct bpf_verifier_env *env,
> @@ -8769,6 +8772,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
>  	u32 i, nargs, func_id, ptr_type_id;
>  	int err, insn_idx = *insn_idx_p;
>  	const struct btf_param *args;
> +	const struct btf_type *ret_t;
>  	struct btf *desc_btf;
>  	u32 *kfunc_flags;
>  
> @@ -8848,7 +8852,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
>  
>  		if (meta.btf == btf_vmlinux && btf_id_set_contains(&special_kfunc_set, meta.func_id)) {
>  			if (meta.func_id == special_kfunc_list[KF_bpf_obj_new_impl]) {
> -				const struct btf_type *ret_t;
>  				struct btf *ret_btf;
>  				u32 ret_btf_id;
>  
> @@ -8898,6 +8901,24 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
>  				regs[BPF_REG_0].type = PTR_TO_BTF_ID;
>  				regs[BPF_REG_0].btf = desc_btf;
>  				regs[BPF_REG_0].btf_id = meta.arg_constant.value;
> +			} else if (meta.func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
> +				if (!capable(CAP_PERFMON)) {
> +					verbose(env,
> +						"kfunc bpf_rdonly_cast requires CAP_PERFMON capability\n");
> +					return -EACCES;
> +				}
> +
> +				ret_t = btf_type_by_id(desc_btf, meta.arg_constant.value);
> +				if (!ret_t || !btf_type_is_struct(ret_t)) {
> +					verbose(env,
> +						"kfunc bpf_rdonly_cast type ID argument must be of a struct\n");
> +					return -EINVAL;
> +				}
> +
> +				mark_reg_known_zero(env, regs, BPF_REG_0);
> +				regs[BPF_REG_0].type = PTR_TO_BTF_ID | PTR_UNTRUSTED;
> +				regs[BPF_REG_0].btf = desc_btf;
> +				regs[BPF_REG_0].btf_id = meta.arg_constant.value;
>  			} else {
>  				verbose(env, "kernel function %s unhandled dynamic return type\n",
>  					meta.func_name);
> @@ -15148,7 +15169,8 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
>  		insn_buf[1] = addr[1];
>  		insn_buf[2] = *insn;
>  		*cnt = 3;
> -	} else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx]) {
> +	} else if (desc->func_id == special_kfunc_list[KF_bpf_cast_to_kern_ctx] ||
> +		   desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) {
>  		insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1);
>  		*cnt = 1;

Nice!
After kfunc refactoring adding new special kfunc looks so clean and easy to review.

I was contemplating to suggest to replace "__k" with "__btf_struct"
to have a single place that checks for btf_type_is_struct(),
but then realized that bpf_obj_new needs prog's btf_id
while bpf_rdonly_cast needs vmlinux's btf_id.
So let's keep __k for now.



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux