Adjust timer/timer_ret_1 test to validate more carefully verifier logic of enforcing async callback return value. This test will pass only if return result is marked precise and read. Signed-off-by: Andrii Nakryiko <andrii@xxxxxxxxxx> --- .../selftests/bpf/progs/timer_failure.c | 29 ++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/timer_failure.c b/tools/testing/selftests/bpf/progs/timer_failure.c index 9000da1e2120..a8dab6697810 100644 --- a/tools/testing/selftests/bpf/progs/timer_failure.c +++ b/tools/testing/selftests/bpf/progs/timer_failure.c @@ -21,17 +21,32 @@ struct { __type(value, struct elem); } timer_map SEC(".maps"); -static int timer_cb_ret1(void *map, int *key, struct bpf_timer *timer) +__naked __noinline __used +static unsigned long timer_cb_ret_bad() { - if (bpf_get_smp_processor_id() % 2) - return 1; - else - return 0; + asm volatile ( + "call %[bpf_get_prandom_u32];" + "if r0 > 1000 goto 1f;" + "r0 = 0;" + "1:" + "goto +0;" /* checkpoint */ + /* async callback is expected to return 0, so branch above + * skipping r0 = 0; should lead to a failure, but if exit + * instruction doesn't enforce r0's precision, this callback + * will be successfully verified + */ + "exit;" + : + : __imm(bpf_get_prandom_u32) + : __clobber_common + ); } SEC("fentry/bpf_fentry_test1") __failure __msg("should have been in [0, 0]") -int BPF_PROG2(test_ret_1, int, a) +__log_level(2) +__flag(BPF_F_TEST_STATE_FREQ) +int BPF_PROG2(test_bad_ret, int, a) { int key = 0; struct bpf_timer *timer; @@ -39,7 +54,7 @@ int BPF_PROG2(test_ret_1, int, a) timer = bpf_map_lookup_elem(&timer_map, &key); if (timer) { bpf_timer_init(timer, &timer_map, CLOCK_BOOTTIME); - bpf_timer_set_callback(timer, timer_cb_ret1); + bpf_timer_set_callback(timer, timer_cb_ret_bad); bpf_timer_start(timer, 1000, 0); } -- 2.34.1