Re: [PATCH bpf-next 2/4] bpf, x64: Fix tailcall hierarchy

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Jan 4, 2024 at 10:16 PM Leon Hwang <hffilwlqm@xxxxxxxxx> wrote:
>
>
>
> On 5/1/24 12:15, Alexei Starovoitov wrote:
> > On Thu, Jan 4, 2024 at 6:23 AM Leon Hwang <hffilwlqm@xxxxxxxxx> wrote:
> >>
> >>
> >> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> >> index fe30b9ebb8de4..67fa337fc2e0c 100644
> >> --- a/arch/x86/net/bpf_jit_comp.c
> >> +++ b/arch/x86/net/bpf_jit_comp.c
> >> @@ -259,7 +259,7 @@ struct jit_context {
> >>  /* Number of bytes emit_patch() needs to generate instructions */
> >>  #define X86_PATCH_SIZE         5
> >>  /* Number of bytes that will be skipped on tailcall */
> >> -#define X86_TAIL_CALL_OFFSET   (11 + ENDBR_INSN_SIZE)
> >> +#define X86_TAIL_CALL_OFFSET   (22 + ENDBR_INSN_SIZE)
> >>
> >>  static void push_r12(u8 **pprog)
> >>  {
> >> @@ -406,14 +406,21 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
> >>          */
> >>         emit_nops(&prog, X86_PATCH_SIZE);
> >>         if (!ebpf_from_cbpf) {
> >> -               if (tail_call_reachable && !is_subprog)
> >> +               if (tail_call_reachable && !is_subprog) {
> >>                         /* When it's the entry of the whole tailcall context,
> >>                          * zeroing rax means initialising tail_call_cnt.
> >>                          */
> >> -                       EMIT2(0x31, 0xC0); /* xor eax, eax */
> >> -               else
> >> -                       /* Keep the same instruction layout. */
> >> -                       EMIT2(0x66, 0x90); /* nop2 */
> >> +                       EMIT2(0x31, 0xC0);       /* xor eax, eax */
> >> +                       EMIT1(0x50);             /* push rax */
> >> +                       /* Make rax as ptr that points to tail_call_cnt. */
> >> +                       EMIT3(0x48, 0x89, 0xE0); /* mov rax, rsp */
> >> +                       EMIT1_off32(0xE8, 2);    /* call main prog */
> >> +                       EMIT1(0x59);             /* pop rcx, get rid of tail_call_cnt */
> >> +                       EMIT1(0xC3);             /* ret */
> >> +               } else {
> >> +                       /* Keep the same instruction size. */
> >> +                       emit_nops(&prog, 13);
> >> +               }
> >
> > I'm afraid the extra call breaks stack unwinding and many other things.
>
> I was worried about it. But I'm not sure how it breaks stack unwinding.
>
> However, without the extra call, I've tried another approach:
>
> * [RFC PATCH bpf-next 1/3] bpf, x64: Fix tailcall hierarchy
>   https://lore.kernel.org/bpf/20231005145814.83122-2-hffilwlqm@xxxxxxxxx/
>
> It's to propagate tail_call_cnt_ptr, too. But more complicated:
>
> diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
> index 8c10d9abc..001c5e4b7 100644
> --- a/arch/x86/net/bpf_jit_comp.c
> +++ b/arch/x86/net/bpf_jit_comp.c
> @@ -313,24 +332,15 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
>                           bool tail_call_reachable, bool is_subprog,
>                           bool is_exception_cb)
>  {
> +       int tcc_ptr_off = round_up(stack_depth, 8) + 8;
> +       int tcc_off = tcc_ptr_off + 8;
>         u8 *prog = *pprog;
>
>         /* BPF trampoline can be made to work without these nops,
>          * but let's waste 5 bytes for now and optimize later
>          */
>         EMIT_ENDBR();
> -       memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
> -       prog += X86_PATCH_SIZE;
> -       if (!ebpf_from_cbpf) {
> -               if (tail_call_reachable && !is_subprog)
> -                       /* When it's the entry of the whole tailcall context,
> -                        * zeroing rax means initialising tail_call_cnt.
> -                        */
> -                       EMIT2(0x31, 0xC0); /* xor eax, eax */
> -               else
> -                       /* Keep the same instruction layout. */
> -                       EMIT2(0x66, 0x90); /* nop2 */
> -       }
> +       emit_nops(&prog, X86_PATCH_SIZE);
>         /* Exception callback receives FP as third parameter */
>         if (is_exception_cb) {
>                 EMIT3(0x48, 0x89, 0xF4); /* mov rsp, rsi */
> @@ -347,15 +357,52 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
>                 EMIT1(0x55);             /* push rbp */
>                 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
>         }
> +       if (!ebpf_from_cbpf) {
> +               if (tail_call_reachable && !is_subprog) {
> +                       /* Make rax as ptr that points to tail_call_cnt. */
> +                       EMIT3(0x48, 0x89, 0xE8);          /* mov rax, rbp */
> +                       EMIT2_off32(0x48, 0x2D, tcc_off); /* sub rax, tcc_off */
> +                       /* When it's the entry of the whole tail call context,
> +                        * storing 0 means initialising tail_call_cnt.
> +                        */
> +                       EMIT2_off32(0xC7, 0x00, 0);       /* mov dword ptr [rax], 0 */
> +               } else {
> +                       /* Keep the same instruction layout. */
> +                       emit_nops(&prog, 3);
> +                       emit_nops(&prog, 6);
> +                       emit_nops(&prog, 6);

Extra 15 nops in the prologue of every bpf program (tailcall or not)
is too high a price to pay.

Think of a simple fix other on verifier side or
simple approach that all JITs can easily do.





[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux