On Sun, Sep 03, 2023 at 11:14:46PM +0800, Leon Hwang wrote: > From commit ebf7d1f508a73871 ("bpf, x64: rework pro/epilogue and tailcall > handling in JIT"), the tailcall on x64 works better than before. > > From commit e411901c0b775a3a ("bpf: allow for tailcalls in BPF subprograms > for x64 JIT"), tailcall is able to run in BPF subprograms on x64. > > From commit 5b92a28aae4dd0f8 ("bpf: Support attaching tracing BPF program > to other BPF programs"), BPF program is able to trace other BPF programs. > > How about combining them all together? > > 1. FENTRY/FEXIT on a BPF subprogram. > 2. A tailcall runs in the BPF subprogram. > 3. The tailcall calls the subprogram's caller. > > As a result, a tailcall infinite loop comes up. And the loop would halt > the machine. > > As we know, in tail call context, the tail_call_cnt propagates by stack > and rax register between BPF subprograms. So do in trampolines. > > Fixes: ebf7d1f508a7 ("bpf, x64: rework pro/epilogue and tailcall handling in JIT") > Fixes: e411901c0b77 ("bpf: allow for tailcalls in BPF subprograms for x64 JIT") > Signed-off-by: Leon Hwang <hffilwlqm@xxxxxxxxx> Reviewed-by: Maciej Fijalkowski <maciej.fijalkowski@xxxxxxxxx> > --- > arch/x86/net/bpf_jit_comp.c | 28 ++++++++++++++++++++++------ > include/linux/bpf.h | 5 +++++ > kernel/bpf/trampoline.c | 4 ++-- > kernel/bpf/verifier.c | 3 +++ > 4 files changed, 32 insertions(+), 8 deletions(-) > > diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c > index bcca1c9b9a027..2846c21d75bfa 100644 > --- a/arch/x86/net/bpf_jit_comp.c > +++ b/arch/x86/net/bpf_jit_comp.c > @@ -1022,6 +1022,10 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op) > > #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp))) > > +/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ > +#define RESTORE_TAIL_CALL_CNT(stack) \ > + EMIT3_off32(0x48, 0x8B, 0x85, -round_up(stack, 8) - 8) > + > static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image, > int oldproglen, struct jit_context *ctx, bool jmp_padding) > { > @@ -1627,9 +1631,7 @@ st: if (is_imm8(insn->off)) > > func = (u8 *) __bpf_call_base + imm32; > if (tail_call_reachable) { > - /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */ > - EMIT3_off32(0x48, 0x8B, 0x85, > - -round_up(bpf_prog->aux->stack_depth, 8) - 8); > + RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth); > if (!imm32) > return -EINVAL; > offs = 7 + x86_call_depth_emit_accounting(&prog, func); > @@ -2404,6 +2406,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i > * [ ... ] > * [ stack_arg2 ] > * RBP - arg_stack_off [ stack_arg1 ] > + * RSP [ tail_call_cnt ] BPF_TRAMP_F_TAIL_CALL_CTX > */ > > /* room for return value of orig_call or fentry prog */ > @@ -2468,6 +2471,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i > else > /* sub rsp, stack_size */ > EMIT4(0x48, 0x83, 0xEC, stack_size); > + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) > + EMIT1(0x50); /* push rax */ > /* mov QWORD PTR [rbp - rbx_off], rbx */ > emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_6, -rbx_off); > > @@ -2520,9 +2525,15 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i > restore_regs(m, &prog, regs_off); > save_args(m, &prog, arg_stack_off, true); > > + if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) > + /* Before calling the original function, restore the > + * tail_call_cnt from stack to rax. > + */ > + RESTORE_TAIL_CALL_CNT(stack_size); > + > if (flags & BPF_TRAMP_F_ORIG_STACK) { > - emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); > - EMIT2(0xff, 0xd0); /* call *rax */ > + emit_ldx(&prog, BPF_DW, BPF_REG_6, BPF_REG_FP, 8); > + EMIT2(0xff, 0xd3); /* call *rbx */ > } else { > /* call original function */ > if (emit_rsb_call(&prog, orig_call, prog)) { > @@ -2573,7 +2584,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i > ret = -EINVAL; > goto cleanup; > } > - } > + } else if (flags & BPF_TRAMP_F_TAIL_CALL_CTX) > + /* Before running the original function, restore the > + * tail_call_cnt from stack to rax. > + */ > + RESTORE_TAIL_CALL_CNT(stack_size); > + > /* restore return value of orig_call or fentry prog back into RAX */ > if (save_ret) > emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8); > diff --git a/include/linux/bpf.h b/include/linux/bpf.h > index cfabbcf47bdb8..c8df257ea435d 100644 > --- a/include/linux/bpf.h > +++ b/include/linux/bpf.h > @@ -1028,6 +1028,11 @@ struct btf_func_model { > */ > #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6) > > +/* Indicate that current trampoline is in a tail call context. Then, it has to > + * cache and restore tail_call_cnt to avoid infinite tail call loop. > + */ > +#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7) > + > /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 > * bytes on x86. > */ > diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c > index 78acf28d48732..16ab5da7161f2 100644 > --- a/kernel/bpf/trampoline.c > +++ b/kernel/bpf/trampoline.c > @@ -415,8 +415,8 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr, bool lock_direct_mut > goto out; > } > > - /* clear all bits except SHARE_IPMODIFY */ > - tr->flags &= BPF_TRAMP_F_SHARE_IPMODIFY; > + /* clear all bits except SHARE_IPMODIFY and TAIL_CALL_CTX */ > + tr->flags &= (BPF_TRAMP_F_SHARE_IPMODIFY | BPF_TRAMP_F_TAIL_CALL_CTX); > > if (tlinks[BPF_TRAMP_FEXIT].nr_links || > tlinks[BPF_TRAMP_MODIFY_RETURN].nr_links) { > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > index 4ccca1f6c9981..765da3007106a 100644 > --- a/kernel/bpf/verifier.c > +++ b/kernel/bpf/verifier.c > @@ -19629,6 +19629,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) > if (!tr) > return -ENOMEM; > > + if (tgt_prog && tgt_prog->aux->tail_call_reachable) > + tr->flags = BPF_TRAMP_F_TAIL_CALL_CTX; > + > prog->aux->dst_trampoline = tr; > return 0; > } > -- > 2.41.0 >