On Fri, Jun 9, 2023 at 2:01 PM Eduard Zingerman <eddyz87@xxxxxxxxx> wrote: > > Change mark_chain_precision() to track precision in situations > like below: > > r2 = unknown value > ... > --- state #0 --- > ... > r1 = r2 // r1 and r2 now share the same ID > ... > --- state #1 {r1.id = A, r2.id = A} --- > ... > if (r2 > 10) goto exit; // find_equal_scalars() assigns range to r1 > ... > --- state #2 {r1.id = A, r2.id = A} --- > r3 = r10 > r3 += r1 // need to mark both r1 and r2 > > At the beginning of the processing of each state, ensure that if a > register with a scalar ID is marked as precise, all registers sharing > this ID are also marked as precise. > > This property would be used by a follow-up change in regsafe(). > > Signed-off-by: Eduard Zingerman <eddyz87@xxxxxxxxx> > --- > include/linux/bpf_verifier.h | 10 +- > kernel/bpf/verifier.c | 115 ++++++++++++++++++ > .../testing/selftests/bpf/verifier/precise.c | 8 +- > 3 files changed, 128 insertions(+), 5 deletions(-) > > diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h > index 5fe589e11ac8..73a98f6240fd 100644 > --- a/include/linux/bpf_verifier.h > +++ b/include/linux/bpf_verifier.h > @@ -559,6 +559,11 @@ struct backtrack_state { > u64 stack_masks[MAX_CALL_FRAMES]; > }; > > +struct bpf_idset { > + u32 count; > + u32 ids[BPF_ID_MAP_SIZE]; > +}; > + > /* single container for all structs > * one verifier_env per bpf_check() call > */ > @@ -590,7 +595,10 @@ struct bpf_verifier_env { > const struct bpf_line_info *prev_linfo; > struct bpf_verifier_log log; > struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; > - struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; > + union { > + struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; > + struct bpf_idset idset_scratch; > + }; > struct { > int *insn_state; > int *insn_stack; > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > index ed79a93398f8..f719de396c61 100644 > --- a/kernel/bpf/verifier.c > +++ b/kernel/bpf/verifier.c > @@ -3787,6 +3787,96 @@ static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_ > } > } > > +static bool idset_contains(struct bpf_idset *s, u32 id) > +{ > + u32 i; > + > + for (i = 0; i < s->count; ++i) > + if (s->ids[i] == id) > + return true; > + > + return false; > +} > + > +static int idset_push(struct bpf_idset *s, u32 id) > +{ > + if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids))) > + return -1; > + s->ids[s->count++] = id; > + return 0; > +} > + > +static void idset_reset(struct bpf_idset *s) > +{ > + s->count = 0; > +} > + > +/* Collect a set of IDs for all registers currently marked as precise in env->bt. > + * Mark all registers with these IDs as precise. > + */ > +static int mark_precise_scalar_ids(struct bpf_verifier_env *env, struct bpf_verifier_state *st) > +{ > + struct bpf_idset *precise_ids = &env->idset_scratch; > + struct backtrack_state *bt = &env->bt; > + struct bpf_func_state *func; > + struct bpf_reg_state *reg; > + DECLARE_BITMAP(mask, 64); > + int i, fr; > + > + idset_reset(precise_ids); > + > + for (fr = bt->frame; fr >= 0; fr--) { > + func = st->frame[fr]; > + > + bitmap_from_u64(mask, bt_frame_reg_mask(bt, fr)); > + for_each_set_bit(i, mask, 32) { > + reg = &func->regs[i]; > + if (!reg->id || reg->type != SCALAR_VALUE) > + continue; > + if (idset_push(precise_ids, reg->id)) > + return -1; > + } > + > + bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr)); > + for_each_set_bit(i, mask, 64) { > + if (i >= func->allocated_stack / BPF_REG_SIZE) > + break; > + if (!is_spilled_scalar_reg(&func->stack[i])) > + continue; > + reg = &func->stack[i].spilled_ptr; > + if (!reg->id) > + continue; > + if (idset_push(precise_ids, reg->id)) > + return -1; -EFAULT, -1 is -EPERM, super confusing if it ever bubbles up. Same for idset_push return code, please use more appropriate error code constants Other than that, it looks good. Please add my ack once you fix error values: Acked-by: Andrii Nakryiko <andrii@xxxxxxxxxx> > + } > + } > + > + for (fr = 0; fr <= st->curframe; ++fr) { > + func = st->frame[fr]; > + > + for (i = BPF_REG_0; i < BPF_REG_10; ++i) { > + reg = &func->regs[i]; > + if (!reg->id) > + continue; > + if (!idset_contains(precise_ids, reg->id)) > + continue; > + bt_set_frame_reg(bt, fr, i); > + } > + for (i = 0; i < func->allocated_stack / BPF_REG_SIZE; ++i) { > + if (!is_spilled_scalar_reg(&func->stack[i])) > + continue; > + reg = &func->stack[i].spilled_ptr; > + if (!reg->id) > + continue; > + if (!idset_contains(precise_ids, reg->id)) > + continue; > + bt_set_frame_slot(bt, fr, i); > + } > + } > + > + return 0; > +} > + [...]