No logic changed, like macro BPF_CALL_IMM, add BPF_CALL_FUNC/_FUNC_ARGS to simplify code. Signed-off-by: Tao Chen <chen.dylane@xxxxxxxxx> --- arch/sparc/net/bpf_jit_comp_64.c | 2 +- arch/x86/net/bpf_jit_comp.c | 2 +- arch/x86/net/bpf_jit_comp32.c | 5 ++--- include/linux/filter.h | 4 ++++ kernel/bpf/core.c | 6 +++--- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 73bf0aea8baf..076b1f216360 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1213,7 +1213,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) /* function call */ case BPF_JMP | BPF_CALL: { - u8 *func = ((u8 *)__bpf_call_base) + imm; + u8 *func = BPF_CALL_FUNC(imm); ctx->saw_call = true; diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 06b080b61aa5..052e5cc65fc0 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -2126,7 +2126,7 @@ st: if (is_imm8(insn->off)) case BPF_JMP | BPF_CALL: { u8 *ip = image + addrs[i - 1]; - func = (u8 *) __bpf_call_base + imm32; + func = BPF_CALL_FUNC(imm32); if (tail_call_reachable) { LOAD_TAIL_CALL_CNT_PTR(bpf_prog->aux->stack_depth); ip += 7; diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index de0f9e5f9f73..f7277639bd2c 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -1627,8 +1627,7 @@ static int emit_kfunc_call(const struct bpf_prog *bpf_prog, u8 *end_addr, /* mov dword ptr [ebp+off],eax */ if (fm->ret_size) end_addr -= 3; - - jmp_offset = (u8 *)__bpf_call_base + insn->imm - end_addr; + jmp_offset = BPF_CALL_FUNC(insn->imm) - end_addr; if (!is_simm32(jmp_offset)) { pr_err("unsupported BPF kernel function jmp_offset:%lld\n", jmp_offset); @@ -2103,7 +2102,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, break; } - func = (u8 *) __bpf_call_base + imm32; + func = BPF_CALL_FUNC(imm32); jmp_offset = func - (image + addrs[i]); if (!imm32 || !is_simm32(jmp_offset)) { diff --git a/include/linux/filter.h b/include/linux/filter.h index 99b6fc83825b..d06526decc6d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -461,6 +461,10 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn) #define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base) +#define BPF_CALL_FUNC(x) ((x) + (u8 *)__bpf_call_base) + +#define BPF_CALL_FUNC_ARGS(x) ((x) + (u8 *)__bpf_call_base_args) + #define BPF_EMIT_CALL(FUNC) \ ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 4e07cc057d6f..f965f0d586f3 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1278,7 +1278,7 @@ int bpf_jit_get_func_addr(const struct bpf_prog *prog, * and the helper with imm relative to it are both in core * kernel. */ - addr = (u8 *)__bpf_call_base + imm; + addr = BPF_CALL_FUNC(imm); } *func_addr = (unsigned long)addr; @@ -2007,12 +2007,12 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn) * preserves BPF_R6-BPF_R9, and stores return value * into BPF_R0. */ - BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3, + BPF_R0 = BPF_CALL_FUNC(insn->imm)(BPF_R1, BPF_R2, BPF_R3, BPF_R4, BPF_R5); CONT; JMP_CALL_ARGS: - BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, + BPF_R0 = BPF_CALL_FUNC_ARGS(insn->imm)(BPF_R1, BPF_R2, BPF_R3, BPF_R4, BPF_R5, insn + insn->off + 1); -- 2.43.0