Add a few test cases to verify precision tracking for scalars gaining range because of sync_linked_regs(): - check what happens when more than 6 registers might gain range in sync_linked_regs(); - check if precision is propagated correctly when operand of conditional jump gained range in sync_linked_regs() and one of linked registers is marked precise; - check if precision is propagated correctly when operand of conditional jump gained range in sync_linked_regs() and a other-linked operand of the conditional jump is marked precise; - add a minimized reproducer for precision tracking bug reported in [0]; - Check that mark_chain_precision() for one of the conditional jump operands does not trigger equal scalars precision propagation. [0] https://lore.kernel.org/bpf/CAEf4BzZ0xidVCqB47XnkXcNhkPWF6_nTV7yt+_Lf0kcFEut2Mg@xxxxxxxxxxxxxx/ Signed-off-by: Eduard Zingerman <eddyz87@xxxxxxxxx> --- .../selftests/bpf/progs/verifier_scalar_ids.c | 165 ++++++++++++++++++ 1 file changed, 165 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c index b642d5c24154..2ecf77b623e0 100644 --- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c +++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c @@ -47,6 +47,72 @@ __naked void linked_regs_bpf_k(void) : __clobber_all); } +/* Registers r{0,1,2} share same ID when 'if r1 > ...' insn is processed, + * check that verifier marks r{1,2} as precise while backtracking + * 'if r1 > ...' with r0 already marked. + */ +SEC("socket") +__success __log_level(2) +__flag(BPF_F_TEST_STATE_FREQ) +__msg("frame0: regs=r0 stack= before 5: (2d) if r1 > r3 goto pc+0") +__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:") +__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7") +__naked void linked_regs_bpf_x_src(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r2.id */ + "r1 = r0;" + "r2 = r0;" + "r3 = 7;" + "if r1 > r3 goto +0;" + /* force r0 to be precise, this eventually marks r1 and r2 as + * precise as well because of shared IDs + */ + "r4 = r10;" + "r4 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Registers r{0,1,2} share same ID when 'if r1 > r3' insn is processed, + * check that verifier marks r{0,1,2} as precise while backtracking + * 'if r1 > r3' with r3 already marked. + */ +SEC("socket") +__success __log_level(2) +__flag(BPF_F_TEST_STATE_FREQ) +__msg("frame0: regs=r3 stack= before 5: (2d) if r1 > r3 goto pc+0") +__msg("frame0: parent state regs=r0,r1,r2,r3 stack=:") +__msg("frame0: regs=r0,r1,r2,r3 stack= before 4: (b7) r3 = 7") +__naked void linked_regs_bpf_x_dst(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r2.id */ + "r1 = r0;" + "r2 = r0;" + "r3 = 7;" + "if r1 > r3 goto +0;" + /* force r0 to be precise, this eventually marks r1 and r2 as + * precise as well because of shared IDs + */ + "r4 = r10;" + "r4 += r3;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + /* Same as linked_regs_bpf_k, but break one of the * links, note that r1 is absent from regs=... in __msg below. */ @@ -280,6 +346,105 @@ __naked void precision_two_ids(void) : __clobber_all); } +SEC("socket") +__success __log_level(2) +__flag(BPF_F_TEST_STATE_FREQ) +/* check thar r0 and r6 have different IDs after 'if', + * collect_linked_regs() can't tie more than 6 registers for a single insn. + */ +__msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1") +__msg("9: (bf) r6 = r6 ; R6_w=scalar(id=2") +/* check that r{0-5} are marked precise after 'if' */ +__msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0") +__msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:") +__naked void linked_regs_too_many_regs(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r{0-6} IDs */ + "r1 = r0;" + "r2 = r0;" + "r3 = r0;" + "r4 = r0;" + "r5 = r0;" + "r6 = r0;" + /* propagate range for r{0-6} */ + "if r0 > 7 goto +0;" + /* make r6 appear in the log */ + "r6 = r6;" + /* force r0 to be precise, + * this would cause r{0-4} to be precise because of shared IDs + */ + "r7 = r10;" + "r7 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +SEC("socket") +__failure __log_level(2) +__flag(BPF_F_TEST_STATE_FREQ) +__msg("regs=r7 stack= before 5: (3d) if r8 >= r0") +__msg("parent state regs=r0,r7,r8") +__msg("regs=r0,r7,r8 stack= before 4: (25) if r0 > 0x1") +__msg("div by zero") +__naked void linked_regs_broken_link_2(void) +{ + asm volatile ( + "call %[bpf_get_prandom_u32];" + "r7 = r0;" + "r8 = r0;" + "call %[bpf_get_prandom_u32];" + "if r0 > 1 goto +0;" + /* r7.id == r8.id, + * thus r7 precision implies r8 precision, + * which implies r0 precision because of the conditional below. + */ + "if r8 >= r0 goto 1f;" + /* break id relation between r7 and r8 */ + "r8 += r8;" + /* make r7 precise */ + "if r7 == 0 goto 1f;" + "r0 /= 0;" +"1:" + "r0 = 42;" + "exit;" + : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + +/* Check that mark_chain_precision() for one of the conditional jump + * operands does not trigger equal scalars precision propagation. + */ +SEC("socket") +__success __log_level(2) +__msg("3: (25) if r1 > 0x100 goto pc+0") +__msg("frame0: regs=r1 stack= before 2: (bf) r1 = r0") +__naked void cjmp_no_linked_regs_trigger(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id */ + "r1 = r0;" + /* the jump below would be predicted, thus r1 would be marked precise, + * this should not imply precision mark for r0 + */ + "if r1 > 256 goto +0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + /* Verify that check_ids() is used by regsafe() for scalars. * * r9 = ... some pointer with range X ... -- 2.45.2