This adds some extra noise to the tailcall_bpf2bpf4 tests that will cause verifier to patch insns. This then moves around subprog start/end insn index and poke descriptor insn index to ensure that verify and JIT will continue to track these correctly. Reviewed-by: Daniel Borkmann <daniel@xxxxxxxxxxxxx> Signed-off-by: John Fastabend <john.fastabend@xxxxxxxxx> --- .../selftests/bpf/progs/tailcall_bpf2bpf4.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c index 9a1b166b7fbe..0d70de5f97e2 100644 --- a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c +++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c @@ -2,6 +2,13 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(__u32)); +} nop_table SEC(".maps"); + struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); __uint(max_entries, 3); @@ -11,9 +18,19 @@ struct { static volatile int count; +__noinline +int subprog_noise(struct __sk_buff *skb) +{ + __u32 key = 0; + + bpf_map_lookup_elem(&nop_table, &key); + return 0; +} + __noinline int subprog_tail_2(struct __sk_buff *skb) { + subprog_noise(skb); bpf_tail_call_static(skb, &jmp_table, 2); return skb->len * 3; }