Current bpf_verifier_ops->btf_struct_access() callback function walks through struct member access chain and gathers information like next_btf_id and bpf_type_flag (for __user and __percpu). In later patches, additional information like whether the pointer is tagged with __rcu will be gathered as well. So refactor btf_struct_access() interface to wrap next_btf_id and bpf_type_flag in a structure, so the new field can be easily added to the new structure without modifying all callback functions. Signed-off-by: Yonghong Song <yhs@xxxxxx> --- include/linux/bpf.h | 11 ++++++++--- include/linux/filter.h | 4 ++-- kernel/bpf/btf.c | 25 ++++++++++++------------- kernel/bpf/verifier.c | 20 ++++++++++---------- net/bpf/bpf_dummy_struct_ops.c | 6 ++---- net/core/filter.c | 20 ++++++++------------ net/ipv4/bpf_tcp_ca.c | 6 ++---- net/netfilter/nf_conntrack_bpf.c | 3 +-- 8 files changed, 45 insertions(+), 50 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 798aec816970..5011cb50abf1 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -758,6 +758,11 @@ struct bpf_prog_ops { union bpf_attr __user *uattr); }; +struct btf_struct_access_info { + u32 next_btf_id; + enum bpf_type_flag flag; +}; + struct bpf_verifier_ops { /* return eBPF function prototype for verification */ const struct bpf_func_proto * @@ -782,7 +787,7 @@ struct bpf_verifier_ops { const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id, enum bpf_type_flag *flag); + struct btf_struct_access_info *info); }; struct bpf_prog_offload_ops { @@ -2070,7 +2075,7 @@ static inline bool bpf_tracing_btf_ctx_access(int off, int size, int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id, enum bpf_type_flag *flag); + struct btf_struct_access_info *info); bool btf_struct_ids_match(struct bpf_verifier_log *log, const struct btf *btf, u32 id, int off, const struct btf *need_btf, u32 need_type_id, @@ -2323,7 +2328,7 @@ static inline int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id, enum bpf_type_flag *flag) + struct btf_struct_access_info *info) { return -EACCES; } diff --git a/include/linux/filter.h b/include/linux/filter.h index efc42a6e3aed..75340a5b83d3 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -570,8 +570,8 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); extern struct mutex nf_conn_btf_access_lock; extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, - enum bpf_access_type atype, u32 *next_btf_id, - enum bpf_type_flag *flag); + enum bpf_access_type atype, + struct btf_struct_access_info *info); typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx, const struct bpf_insn *insnsi, diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 5579ff3a5b54..cf16c0ead9f4 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -5639,7 +5639,7 @@ enum bpf_struct_walk_result { static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, - u32 *next_btf_id, enum bpf_type_flag *flag) + struct btf_struct_access_info *info) { u32 i, moff, mtrue_end, msize = 0, total_nelems = 0; const struct btf_type *mtype, *elem_type = NULL; @@ -5818,7 +5818,7 @@ static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, /* return if the offset matches the member offset */ if (off == moff) { - *next_btf_id = mid; + info->next_btf_id = mid; return WALK_STRUCT; } @@ -5853,8 +5853,8 @@ static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, stype = btf_type_skip_modifiers(btf, mtype->type, &id); if (btf_type_is_struct(stype)) { - *next_btf_id = id; - *flag = tmp_flag; + info->next_btf_id = id; + info->flag = tmp_flag; return WALK_PTR; } } @@ -5881,22 +5881,20 @@ static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf, int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype __maybe_unused, - u32 *next_btf_id, enum bpf_type_flag *flag) + struct btf_struct_access_info *info) { - enum bpf_type_flag tmp_flag = 0; + struct btf_struct_access_info tmp_info = {}; int err; - u32 id; do { - err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag); + err = btf_struct_walk(log, btf, t, off, size, &tmp_info); switch (err) { case WALK_PTR: /* If we found the pointer or scalar on t+off, * we're done. */ - *next_btf_id = id; - *flag = tmp_flag; + *info = tmp_info; return PTR_TO_BTF_ID; case WALK_SCALAR: return SCALAR_VALUE; @@ -5905,7 +5903,7 @@ int btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, * by diving in it. At this point the offset is * aligned with the new type, so set it to 0. */ - t = btf_type_by_id(btf, id); + t = btf_type_by_id(btf, tmp_info.next_btf_id); off = 0; break; default: @@ -5942,7 +5940,7 @@ bool btf_struct_ids_match(struct bpf_verifier_log *log, bool strict) { const struct btf_type *type; - enum bpf_type_flag flag; + struct btf_struct_access_info info = {}; int err; /* Are we already done? */ @@ -5958,7 +5956,7 @@ bool btf_struct_ids_match(struct bpf_verifier_log *log, type = btf_type_by_id(btf, id); if (!type) return false; - err = btf_struct_walk(log, btf, type, off, 1, &id, &flag); + err = btf_struct_walk(log, btf, type, off, 1, &info); if (err != WALK_STRUCT) return false; @@ -5967,6 +5965,7 @@ bool btf_struct_ids_match(struct bpf_verifier_log *log, * continue the search with offset 0 in the new * type. */ + id = info.next_btf_id; if (!btf_types_are_same(btf, id, need_btf, need_type_id)) { off = 0; goto again; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d3b75aa0c54d..4d50f9568245 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4648,8 +4648,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, struct bpf_reg_state *reg = regs + regno; const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id); const char *tname = btf_name_by_offset(reg->btf, t->name_off); - enum bpf_type_flag flag = 0; - u32 btf_id; + struct btf_struct_access_info info = {}; int ret; if (off < 0) { @@ -4684,7 +4683,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, if (env->ops->btf_struct_access) { ret = env->ops->btf_struct_access(&env->log, reg->btf, t, - off, size, atype, &btf_id, &flag); + off, size, atype, &info); } else { if (atype != BPF_READ) { verbose(env, "only read is supported\n"); @@ -4692,7 +4691,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, } ret = btf_struct_access(&env->log, reg->btf, t, off, size, - atype, &btf_id, &flag); + atype, &info); } if (ret < 0) @@ -4702,10 +4701,11 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, * also inherit the untrusted flag. */ if (type_flag(reg->type) & PTR_UNTRUSTED) - flag |= PTR_UNTRUSTED; + info.flag |= PTR_UNTRUSTED; if (atype == BPF_READ && value_regno >= 0) - mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag); + mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, info.next_btf_id, + info.flag); return 0; } @@ -4717,11 +4717,10 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env, int value_regno) { struct bpf_reg_state *reg = regs + regno; + struct btf_struct_access_info info = {}; struct bpf_map *map = reg->map_ptr; - enum bpf_type_flag flag = 0; const struct btf_type *t; const char *tname; - u32 btf_id; int ret; if (!btf_vmlinux) { @@ -4756,12 +4755,13 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env, return -EACCES; } - ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id, &flag); + ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &info); if (ret < 0) return ret; if (value_regno >= 0) - mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id, flag); + mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, info.next_btf_id, + info.flag); return 0; } diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index e78dadfc5829..dd72028ac5b1 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -159,8 +159,7 @@ static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id, - enum bpf_type_flag *flag) + struct btf_struct_access_info *info) { const struct btf_type *state; s32 type_id; @@ -177,8 +176,7 @@ static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log, return -EACCES; } - err = btf_struct_access(log, btf, t, off, size, atype, next_btf_id, - flag); + err = btf_struct_access(log, btf, t, off, size, atype, info); if (err < 0) return err; diff --git a/net/core/filter.c b/net/core/filter.c index cb3b635e35be..98f10147c677 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -8653,26 +8653,24 @@ EXPORT_SYMBOL_GPL(nf_conn_btf_access_lock); int (*nfct_btf_struct_access)(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, - enum bpf_access_type atype, u32 *next_btf_id, - enum bpf_type_flag *flag); + enum bpf_access_type atype, + struct btf_struct_access_info *info); EXPORT_SYMBOL_GPL(nfct_btf_struct_access); static int tc_cls_act_btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id, - enum bpf_type_flag *flag) + struct btf_struct_access_info *info) { int ret = -EACCES; if (atype == BPF_READ) - return btf_struct_access(log, btf, t, off, size, atype, next_btf_id, - flag); + return btf_struct_access(log, btf, t, off, size, atype, info); mutex_lock(&nf_conn_btf_access_lock); if (nfct_btf_struct_access) - ret = nfct_btf_struct_access(log, btf, t, off, size, atype, next_btf_id, flag); + ret = nfct_btf_struct_access(log, btf, t, off, size, atype, info); mutex_unlock(&nf_conn_btf_access_lock); return ret; @@ -8741,18 +8739,16 @@ static int xdp_btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id, - enum bpf_type_flag *flag) + struct btf_struct_access_info *info) { int ret = -EACCES; if (atype == BPF_READ) - return btf_struct_access(log, btf, t, off, size, atype, next_btf_id, - flag); + return btf_struct_access(log, btf, t, off, size, atype, info); mutex_lock(&nf_conn_btf_access_lock); if (nfct_btf_struct_access) - ret = nfct_btf_struct_access(log, btf, t, off, size, atype, next_btf_id, flag); + ret = nfct_btf_struct_access(log, btf, t, off, size, atype, info); mutex_unlock(&nf_conn_btf_access_lock); return ret; diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 6da16ae6a962..dde4ac3f44fd 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -72,14 +72,12 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id, - enum bpf_type_flag *flag) + struct btf_struct_access_info *info) { size_t end; if (atype == BPF_READ) - return btf_struct_access(log, btf, t, off, size, atype, next_btf_id, - flag); + return btf_struct_access(log, btf, t, off, size, atype, info); if (t != tcp_sock_type) { bpf_log(log, "only read is supported\n"); diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c index 8639e7efd0e2..e09b0dfd8115 100644 --- a/net/netfilter/nf_conntrack_bpf.c +++ b/net/netfilter/nf_conntrack_bpf.c @@ -194,8 +194,7 @@ static int _nf_conntrack_btf_struct_access(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, int off, int size, enum bpf_access_type atype, - u32 *next_btf_id, - enum bpf_type_flag *flag) + struct btf_struct_access_info *info) { const struct btf_type *ncit; const struct btf_type *nct; -- 2.30.2