In adjust_scalar_min_max_vals(), let 32bit bounds learn from 64bit bounds to get more tight bounds tracking. Similar operation can be found in reg_set_min_max(). Also, we can now fold reg_bounds_sync() into zext_32_to_64(). Before: func#0 @0 0: R1=ctx(off=0,imm=0) R10=fp0 0: (b7) r0 = 0 ; R0_w=0 1: (b7) r1 = 0 ; R1_w=0 2: (87) r1 = -r1 ; R1_w=scalar() 3: (87) r1 = -r1 ; R1_w=scalar() 4: (c7) r1 s>>= 63 ; R1_w=scalar(smin=-1,smax=0) 5: (07) r1 += 2 ; R1_w=scalar(umin=1,umax=2,var_off=(0x0; 0xffffffff)) <--- [*] 6: (95) exit It can be seen that even if the 64bit bounds is clear here, the 32bit bounds is still in the state of 'UNKNOWN'. After: func#0 @0 0: R1=ctx(off=0,imm=0) R10=fp0 0: (b7) r0 = 0 ; R0_w=0 1: (b7) r1 = 0 ; R1_w=0 2: (87) r1 = -r1 ; R1_w=scalar() 3: (87) r1 = -r1 ; R1_w=scalar() 4: (c7) r1 s>>= 63 ; R1_w=scalar(smin=-1,smax=0) 5: (07) r1 += 2 ; R1_w=scalar(umin=1,umax=2,var_off=(0x0; 0x3)) <--- [*] 6: (95) exit Signed-off-by: Youlin Li <liulin063@xxxxxxxxx> --- kernel/bpf/verifier.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 0efbac0fd126..1f5c6e3634d6 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4383,6 +4383,7 @@ static void zext_32_to_64(struct bpf_reg_state *reg) { reg->var_off = tnum_subreg(reg->var_off); __reg_assign_32_into_64(reg); + reg_bounds_sync(reg); } /* truncate register to smaller size (in bytes) @@ -8934,10 +8935,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, break; } - /* ALU32 ops are zero extended into 64bit register */ - if (alu32) + if (alu32) { + /* ALU32 ops are zero extended into 64bit register */ zext_32_to_64(dst_reg); - reg_bounds_sync(dst_reg); + } else { + __reg_combine_64_into_32(dst_reg); + } return 0; } @@ -9126,7 +9129,6 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) insn->dst_reg); } zext_32_to_64(dst_reg); - reg_bounds_sync(dst_reg); } } else { /* case: R = imm -- 2.25.1