On Fri, 2023-10-27 at 11:13 -0700, Andrii Nakryiko wrote: > > When performing 32-bit conditional operation operating on lower 32 bits > > of a full 64-bit register, register full value isn't changed. We just > > potentially gain new knowledge about that register's lower 32 bits. > > > > Unfortunately, __reg_combine_{32,64}_into_{64,32} logic that > > reg_set_min_max() performs as a last step, can lose information in some > > cases due to __mark_reg64_unbounded() and __reg_assign_32_into_64(). > > That's bad and completely unnecessary. Especially __reg_assign_32_into_64() > > looks completely out of place here, because we are not performing > > zero-extending subregister assignment during conditional jump. > > > > So this patch replaced __reg_combine_* with just a normal > > reg_bounds_sync() which will do a proper job of deriving u64/s64 bounds > > from u32/s32, and vice versa (among all other combinations). > > > > __reg_combine_64_into_32() is also used in one more place, > > coerce_reg_to_size(), while handling 1- and 2-byte register loads. > > Looking into this, it seems like besides marking subregister as > > unbounded before performing reg_bounds_sync(), we were also performing > > deduction of smin32/smax32 and umin32/umax32 bounds from respective > > smin/smax and umin/umax bounds. It's now redundant as reg_bounds_sync() > > performs all the same logic more generically (e.g., without unnecessary > > assumption that upper 32 bits of full register should be zero). > > > > Long story short, we remove __reg_combine_64_into_32() completely, and > > coerce_reg_to_size() now only does resetting subreg to unbounded and then > > performing reg_bounds_sync() to recover as much information as possible > > from 64-bit umin/umax and smin/smax bounds, set explicitly in > > coerce_reg_to_size() earlier. > > > > Signed-off-by: Andrii Nakryiko <andrii@xxxxxxxxxx> Acked-by: Eduard Zingerman <eddyz87@xxxxxxxxx> > > --- > > kernel/bpf/verifier.c | 60 ++++++------------------------------------- > > 1 file changed, 8 insertions(+), 52 deletions(-) > > > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > > index 6b0736c04ebe..f5fcb7fb2c67 100644 > > --- a/kernel/bpf/verifier.c > > +++ b/kernel/bpf/verifier.c > > @@ -2641,51 +2641,6 @@ static void __reg_assign_32_into_64(struct bpf_reg_state *reg) > > } > > } > > > > -static void __reg_combine_32_into_64(struct bpf_reg_state *reg) > > -{ > > - /* special case when 64-bit register has upper 32-bit register > > - * zeroed. Typically happens after zext or <<32, >>32 sequence > > - * allowing us to use 32-bit bounds directly, > > - */ > > - if (tnum_equals_const(tnum_clear_subreg(reg->var_off), 0)) { > > - __reg_assign_32_into_64(reg); > > - } else { > > - /* Otherwise the best we can do is push lower 32bit known and > > - * unknown bits into register (var_off set from jmp logic) > > - * then learn as much as possible from the 64-bit tnum > > - * known and unknown bits. The previous smin/smax bounds are > > - * invalid here because of jmp32 compare so mark them unknown > > - * so they do not impact tnum bounds calculation. > > - */ > > - __mark_reg64_unbounded(reg); > > - } > > - reg_bounds_sync(reg); > > -} > > - > > -static bool __reg64_bound_s32(s64 a) > > -{ > > - return a >= S32_MIN && a <= S32_MAX; > > -} > > - > > -static bool __reg64_bound_u32(u64 a) > > -{ > > - return a >= U32_MIN && a <= U32_MAX; > > -} > > - > > -static void __reg_combine_64_into_32(struct bpf_reg_state *reg) > > -{ > > - __mark_reg32_unbounded(reg); > > - if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) { > > - reg->s32_min_value = (s32)reg->smin_value; > > - reg->s32_max_value = (s32)reg->smax_value; > > - } > > - if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) { > > - reg->u32_min_value = (u32)reg->umin_value; > > - reg->u32_max_value = (u32)reg->umax_value; > > - } > > - reg_bounds_sync(reg); > > -} > > - > > /* Mark a register as having a completely unknown (scalar) value. */ > > static void __mark_reg_unknown(const struct bpf_verifier_env *env, > > struct bpf_reg_state *reg) > > @@ -6382,9 +6337,10 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) > > * values are also truncated so we push 64-bit bounds into > > * 32-bit bounds. Above were truncated < 32-bits already. > > */ > > - if (size >= 4) > > - return; > > - __reg_combine_64_into_32(reg); > > + if (size < 4) { > > + __mark_reg32_unbounded(reg); > > + reg_bounds_sync(reg); > > + } > > } > > > > static void set_sext64_default_val(struct bpf_reg_state *reg, int size) > > @@ -14623,13 +14579,13 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg, > > tnum_subreg(false_32off)); > > true_reg->var_off = tnum_or(tnum_clear_subreg(true_64off), > > tnum_subreg(true_32off)); > > - __reg_combine_32_into_64(false_reg); > > - __reg_combine_32_into_64(true_reg); > > + reg_bounds_sync(false_reg); > > + reg_bounds_sync(true_reg); > > } else { > > false_reg->var_off = false_64off; > > true_reg->var_off = true_64off; > > - __reg_combine_64_into_32(false_reg); > > - __reg_combine_64_into_32(true_reg); > > + reg_bounds_sync(false_reg); > > + reg_bounds_sync(true_reg); > > } > > } > >