On Thu, 2024-12-19 at 21:09 -0700, Daniel Xu wrote: lgtm, but please see a note below. [...] > +/* Returns constant key value if possible, else negative error */ > +static s64 get_constant_map_key(struct bpf_verifier_env *env, > + struct bpf_reg_state *key, > + u32 key_size) > +{ > + struct bpf_func_state *state = func(env, key); > + struct bpf_reg_state *reg; > + int slot, spi, off; > + int spill_size = 0; > + int zero_size = 0; > + int stack_off; > + int i, err; > + u8 *stype; > + > + if (!env->bpf_capable) > + return -EOPNOTSUPP; > + if (key->type != PTR_TO_STACK) > + return -EOPNOTSUPP; > + if (!tnum_is_const(key->var_off)) > + return -EOPNOTSUPP; > + > + stack_off = key->off + key->var_off.value; > + slot = -stack_off - 1; > + spi = slot / BPF_REG_SIZE; > + off = slot % BPF_REG_SIZE; > + stype = state->stack[spi].slot_type; > + > + /* First handle precisely tracked STACK_ZERO */ > + for (i = off; i >= 0 && stype[i] == STACK_ZERO; i--) > + zero_size++; > + if (zero_size >= key_size) > + return 0; > + > + /* Check that stack contains a scalar spill of expected size */ > + if (!is_spilled_scalar_reg(&state->stack[spi])) > + return -EOPNOTSUPP; > + for (i = off; i >= 0 && stype[i] == STACK_SPILL; i--) > + spill_size++; > + if (spill_size != key_size) > + return -EOPNOTSUPP; > + > + reg = &state->stack[spi].spilled_ptr; > + if (!tnum_is_const(reg->var_off)) > + /* Stack value not statically known */ > + return -EOPNOTSUPP; > + > + /* We are relying on a constant value. So mark as precise > + * to prevent pruning on it. > + */ > + bt_set_frame_slot(&env->bt, env->cur_state->curframe, spi); I think env->cur_state->curframe is not always correct here. It should be key->frameno, as key might point a few stack frames up. > + err = mark_chain_precision_batch(env); > + if (err < 0) > + return err; > + > + return reg->var_off.value; > +} > + > static int check_func_arg(struct bpf_verifier_env *env, u32 arg, > struct bpf_call_arg_meta *meta, > const struct bpf_func_proto *fn, [...]