In order to get rid of propagating tail call counter by rax and stack, it's to store tail call counter at task_struct. Then, at the prologue of bpf prog, it has to initialise the tail call counter at task_struct. And when tail call happens, it has to compare and increment the tail call counter at task_struct. Signed-off-by: Leon Hwang <hffilwlqm@xxxxxxxxx> --- include/linux/sched.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 3c2abbc587b49..d0696fcabf14f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1501,6 +1501,8 @@ struct task_struct { struct bpf_local_storage __rcu *bpf_storage; /* Used for BPF run context */ struct bpf_run_ctx *bpf_ctx; + /* Used for BPF run time */ + u32 bpf_tail_call_cnt; #endif #ifdef CONFIG_GCC_PLUGIN_STACKLEAK -- 2.44.0