Add verifier tests that verify the new pruning behavior for STACK_DYNPTR slots, and ensure that state equivalence takes into account changes to the old and current verifier state correctly. Also ensure that the stacksafe changes are actually enabling pruning in case states are equivalent from pruning PoV. Signed-off-by: Kumar Kartikeya Dwivedi <memxor@xxxxxxxxx> --- .../testing/selftests/bpf/progs/dynptr_fail.c | 141 ++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index e43000c63c66..8f7b239b8503 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -35,6 +35,13 @@ struct { __type(value, __u32); } array_map3 SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} array_map4 SEC(".maps"); + struct sample { int pid; long value; @@ -653,3 +660,137 @@ int dynptr_from_mem_invalid_api(void *ctx) return 0; } + +SEC("?tc") +__failure __msg("cannot overwrite referenced dynptr") __log_level(2) +int dynptr_pruning_overwrite(struct __sk_buff *ctx) +{ + asm volatile ( + "r9 = 0xeB9F;" + "r6 = %[ringbuf] ll;" + "r1 = r6;" + "r2 = 8;" + "r3 = 0;" + "r4 = r10;" + "r4 += -16;" + "call %[bpf_ringbuf_reserve_dynptr];" + "if r0 == 0 goto pjmp1;" + "goto pjmp2;" + "pjmp1:" + "*(u64 *)(r10 - 16) = r9;" + "pjmp2:" + "r1 = r10;" + "r1 += -16;" + "r2 = 0;" + "call %[bpf_ringbuf_discard_dynptr];" + : + : __imm(bpf_ringbuf_reserve_dynptr), + __imm(bpf_ringbuf_discard_dynptr), + __imm_addr(ringbuf) + : __clobber_all + ); + return 0; +} + +SEC("?tc") +__success __msg("12: safe") __log_level(2) +int dynptr_pruning_stacksafe(struct __sk_buff *ctx) +{ + asm volatile ( + "r9 = 0xeB9F;" + "r6 = %[ringbuf] ll;" + "r1 = r6;" + "r2 = 8;" + "r3 = 0;" + "r4 = r10;" + "r4 += -16;" + "call %[bpf_ringbuf_reserve_dynptr];" + "if r0 == 0 goto stjmp1;" + "goto stjmp2;" + "stjmp1:" + "r9 = r9;" + "stjmp2:" + "r1 = r10;" + "r1 += -16;" + "r2 = 0;" + "call %[bpf_ringbuf_discard_dynptr];" + : + : __imm(bpf_ringbuf_reserve_dynptr), + __imm(bpf_ringbuf_discard_dynptr), + __imm_addr(ringbuf) + : __clobber_all + ); + return 0; +} + +SEC("?tc") +__failure __msg("cannot overwrite referenced dynptr") __log_level(2) +int dynptr_pruning_type_confusion(struct __sk_buff *ctx) +{ + asm volatile ( + "r6 = %[array_map4] ll;" + "r7 = %[ringbuf] ll;" + "r1 = r6;" + "r2 = r10;" + "r2 += -8;" + "r9 = 0;" + "*(u64 *)(r2 + 0) = r9;" + "r3 = r10;" + "r3 += -24;" + "r9 = 0xeB9FeB9F;" + "*(u64 *)(r10 - 16) = r9;" + "*(u64 *)(r10 - 24) = r9;" + "r9 = 0;" + "r4 = 0;" + "r8 = r2;" + "call %[bpf_map_update_elem];" + "r1 = r6;" + "r2 = r8;" + "call %[bpf_map_lookup_elem];" + "if r0 != 0 goto tjmp1;" + "exit;" + "tjmp1:" + "r8 = r0;" + "r1 = r7;" + "r2 = 8;" + "r3 = 0;" + "r4 = r10;" + "r4 += -16;" + "r0 = *(u64 *)(r0 + 0);" + "call %[bpf_ringbuf_reserve_dynptr];" + "if r0 == 0 goto tjmp2;" + "r8 = r8;" + "r8 = r8;" + "r8 = r8;" + "r8 = r8;" + "r8 = r8;" + "r8 = r8;" + "r8 = r8;" + "goto tjmp3;" + "tjmp2:" + "*(u64 *)(r10 - 8) = r9;" + "*(u64 *)(r10 - 16) = r9;" + "r1 = r8;" + "r1 += 8;" + "r2 = 0;" + "r3 = 0;" + "r4 = r10;" + "r4 += -16;" + "call %[bpf_dynptr_from_mem];" + "tjmp3:" + "r1 = r10;" + "r1 += -16;" + "r2 = 0;" + "call %[bpf_ringbuf_discard_dynptr];" + : + : __imm(bpf_map_update_elem), + __imm(bpf_map_lookup_elem), + __imm(bpf_ringbuf_reserve_dynptr), + __imm(bpf_dynptr_from_mem), + __imm(bpf_ringbuf_discard_dynptr), + __imm_addr(array_map4), + __imm_addr(ringbuf) + : __clobber_all + ); + return 0; +} -- 2.39.1