This patch eliminates extra bpf_reg_state memory usage added due to previous patch keeping a copy of lock identity in reg state for non-owning refs. Instead of copying lock identity around, this patch changes non_owning_ref_lock field to be a bool, taking advantage of the following: * There can currently only be one active lock at a time * non-owning refs are only valid in the critical section So if a verifier_state has an active_lock, any non-owning ref must've been obtained under that lock, and any non-owning ref not obtained under that lock must have been invalidated previously. Therefore if a non-owning ref is associated with a lock, it's the active_lock of the current state. So we can keep a bool "are we associated with active_lock of current state" instead of copying lock identity around. Signed-off-by: Dave Marchevsky <davemarchevsky@xxxxxx> --- include/linux/bpf_verifier.h | 2 +- kernel/bpf/verifier.c | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 1c6bbde40705..baeb5afb0b81 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -84,7 +84,7 @@ struct bpf_reg_state { struct { struct btf *btf; u32 btf_id; - struct bpf_active_lock non_owning_ref_lock; + bool non_owning_ref_lock; }; u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4b1883ffcf21..ed816e824928 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -935,9 +935,8 @@ static void print_verifier_state(struct bpf_verifier_env *env, verbose_a("id=%d", reg->id); if (reg->ref_obj_id) verbose_a("ref_obj_id=%d", reg->ref_obj_id); - if (reg->non_owning_ref_lock.ptr) - verbose_a("non_own_id=(%p,%d)", reg->non_owning_ref_lock.ptr, - reg->non_owning_ref_lock.id); + if (reg->non_owning_ref_lock) + verbose(env, "non_own_ref,"); if (t != SCALAR_VALUE) verbose_a("off=%d", reg->off); if (type_is_pkt_pointer(t)) @@ -4834,7 +4833,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env, } if (type_is_alloc(reg->type) && !reg->ref_obj_id && - !reg->non_owning_ref_lock.ptr) { + !reg->non_owning_ref_lock) { verbose(env, "verifier internal error: ref_obj_id for allocated object must be non-zero\n"); return -EFAULT; } @@ -7085,13 +7084,15 @@ static int release_reference(struct bpf_verifier_env *env, static void invalidate_non_owning_refs(struct bpf_verifier_env *env, struct bpf_active_lock *lock) { + struct bpf_active_lock *cur_state_lock; struct bpf_func_state *unused; struct bpf_reg_state *reg; + cur_state_lock = &env->cur_state->active_lock; bpf_for_each_reg_in_vstate(env->cur_state, unused, reg, ({ - if (reg->non_owning_ref_lock.ptr && - reg->non_owning_ref_lock.ptr == lock->ptr && - reg->non_owning_ref_lock.id == lock->id) + if (reg->non_owning_ref_lock && + cur_state_lock->ptr == lock->ptr && + cur_state_lock->id == lock->id) __mark_reg_unknown(env, reg); })); } @@ -8718,13 +8719,12 @@ static int ref_set_non_owning_lock(struct bpf_verifier_env *env, struct bpf_reg_ return -EFAULT; } - if (reg->non_owning_ref_lock.ptr) { + if (reg->non_owning_ref_lock) { verbose(env, "verifier internal error: non_owning_ref_lock already set\n"); return -EFAULT; } - reg->non_owning_ref_lock.id = state->active_lock.id; - reg->non_owning_ref_lock.ptr = state->active_lock.ptr; + reg->non_owning_ref_lock = true; return 0; } -- 2.30.2