When we will have multiple functions attached to trampoline we need to propagate the function's address to the bpf program. Adding new BPF_TRAMP_F_IP_ARG flag to arch_prepare_bpf_trampoline function that will store origin caller's address before function's arguments. Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx> --- arch/x86/net/bpf_jit_comp.c | 18 ++++++++++++++---- include/linux/bpf.h | 5 +++++ 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index b77e6bd78354..d2425c18272a 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1951,7 +1951,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i void *orig_call) { int ret, i, cnt = 0, nr_args = m->nr_args; - int stack_size = nr_args * 8; + int stack_size = nr_args * 8, ip_arg = 0; struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; @@ -1975,6 +1975,9 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i */ orig_call += X86_PATCH_SIZE; + if (flags & BPF_TRAMP_F_IP_ARG) + stack_size += 8; + prog = image; EMIT1(0x55); /* push rbp */ @@ -1982,7 +1985,14 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ EMIT1(0x53); /* push rbx */ - save_regs(m, &prog, nr_args, stack_size); + if (flags & BPF_TRAMP_F_IP_ARG) { + emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); + EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE); /* sub $X86_PATCH_SIZE,%rax*/ + emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -stack_size); + ip_arg = 8; + } + + save_regs(m, &prog, nr_args, stack_size - ip_arg); if (flags & BPF_TRAMP_F_CALL_ORIG) { /* arg1: mov rdi, im */ @@ -2011,7 +2021,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i } if (flags & BPF_TRAMP_F_CALL_ORIG) { - restore_regs(m, &prog, nr_args, stack_size); + restore_regs(m, &prog, nr_args, stack_size - ip_arg); if (flags & BPF_TRAMP_F_ORIG_STACK) { emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); @@ -2052,7 +2062,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i } if (flags & BPF_TRAMP_F_RESTORE_REGS) - restore_regs(m, &prog, nr_args, stack_size); + restore_regs(m, &prog, nr_args, stack_size - ip_arg); /* This needs to be done regardless. If there were fmod_ret programs, * the return value is only updated on the stack and still needs to be diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 16fc600503fb..6cbf3c81c650 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -559,6 +559,11 @@ struct btf_func_model { */ #define BPF_TRAMP_F_ORIG_STACK BIT(3) +/* First argument is IP address of the caller. Makes sense for fentry/fexit + * programs only. + */ +#define BPF_TRAMP_F_IP_ARG BIT(4) + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 */ -- 2.31.1