On Tue, 2022-12-13 at 16:35 -0800, Andrii Nakryiko wrote: > On Fri, Dec 9, 2022 at 5:58 AM Eduard Zingerman <eddyz87@xxxxxxxxx> wrote: > > > > A test case that would erroneously pass verification if > > verifier.c:states_equal() maintains separate register ID mappings for > > call frames. > > > > Signed-off-by: Eduard Zingerman <eddyz87@xxxxxxxxx> > > --- > > It's so hard to read these tests. Moving forward, let's try adding new > verifier tests like this using __naked functions and embedded > assembly. With recent test loader changes ([0]), there isn't much > that's needed, except for a few simple examples to get us started and > perhaps __flags(BPF_F_TEST_STATE_FREQ) support. The upside is that > using maps or global variables from assembly is now possible and easy, > and doesn't require any custom loader support at all. > > > [0] https://patchwork.kernel.org/project/netdevbpf/list/?series=702713&state=* > > This is very nice, I'll try to use it for the next patch-set. How do you think it should look like for test_verifier kind of tests? The easiest way would be to just add new BPF sources under progs/ and have some prog_tests/verifier.c like this: int test_verifier() ... RUN_TESTS(array_access), RUN_TESTS(scalar_ids) ... Thus reusing the build mechanics for skeletons etc. However, it seems to break current logical separation between "unit" tests in test_verifier and "functional" tests in test_progs. But this may be ok. > > tools/testing/selftests/bpf/verifier/calls.c | 82 ++++++++++++++++++++ > > 1 file changed, 82 insertions(+) > > > > diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c > > index 3193915c5ee6..bcd15b26dcee 100644 > > --- a/tools/testing/selftests/bpf/verifier/calls.c > > +++ b/tools/testing/selftests/bpf/verifier/calls.c > > @@ -2305,3 +2305,85 @@ > > .errstr = "!read_ok", > > .result = REJECT, > > }, > > +/* Make sure that verifier.c:states_equal() considers IDs from all > > + * frames when building 'idmap' for check_ids(). > > + */ > > +{ > > + "calls: check_ids() across call boundary", > > + .insns = { > > + /* Function main() */ > > + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), > > + /* fp[-24] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */ > > + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), > > + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), > > + BPF_LD_MAP_FD(BPF_REG_1, > > + 0), > > + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), > > + BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -24), > > + /* fp[-32] = map_lookup_elem(...) ; get a MAP_VALUE_PTR_OR_NULL with some ID */ > > + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), > > + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), > > + BPF_LD_MAP_FD(BPF_REG_1, > > + 0), > > + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), > > + BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -32), > > + /* call foo(&fp[-24], &fp[-32]) ; both arguments have IDs in the current > > + * ; stack frame > > + */ > > + BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP), > > + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -24), > > + BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), > > + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -32), > > + BPF_CALL_REL(2), > > + /* exit 0 */ > > + BPF_MOV64_IMM(BPF_REG_0, 0), > > + BPF_EXIT_INSN(), > > + /* Function foo() > > + * > > + * r9 = &frame[0].fp[-24] ; save arguments in the callee saved registers, > > + * r8 = &frame[0].fp[-32] ; arguments are pointers to pointers to map value > > + */ > > + BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), > > + BPF_MOV64_REG(BPF_REG_8, BPF_REG_2), > > + /* r7 = ktime_get_ns() */ > > + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), > > + BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), > > + /* r6 = ktime_get_ns() */ > > + BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns), > > + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), > > + /* if r6 > r7 goto +1 ; no new information about the state is derived from > > + * ; this check, thus produced verifier states differ > > + * ; only in 'insn_idx' > > + * r9 = r8 > > + */ > > + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), > > + BPF_MOV64_REG(BPF_REG_9, BPF_REG_8), > > + /* r9 = *r9 ; verifier get's to this point via two paths: > > + * ; (I) one including r9 = r8, verified first; > > + * ; (II) one excluding r9 = r8, verified next. > > + * ; After load of *r9 to r9 the frame[0].fp[-24].id == r9.id. > > + * ; Suppose that checkpoint is created here via path (I). > > + * ; When verifying via (II) the r9.id must be compared against > > + * ; frame[0].fp[-24].id, otherwise (I) and (II) would be > > + * ; incorrectly deemed equivalent. > > + * if r9 == 0 goto <exit> > > + */ > > + BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_9, 0), > > + BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1), > > + /* r8 = *r8 ; read map value via r8, this is not safe > > + * r0 = *r8 ; because r8 might be not equal to r9. > > + */ > > + BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_8, 0), > > + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0), > > + /* exit 0 */ > > + BPF_MOV64_IMM(BPF_REG_0, 0), > > + BPF_EXIT_INSN(), > > + }, > > + .flags = BPF_F_TEST_STATE_FREQ, > > + .fixup_map_hash_8b = { 3, 9 }, > > + .result = REJECT, > > + .errstr = "R8 invalid mem access 'map_value_or_null'", > > + .result_unpriv = REJECT, > > + .errstr_unpriv = "", > > + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, > > +}, > > -- > > 2.34.1 > >