Andrii Nakryiko wrote: > If BPF JIT supports per-CPU MOV instruction, inline bpf_get_smp_processor_id() > to eliminate unnecessary function calls. > > Signed-off-by: Andrii Nakryiko <andrii@xxxxxxxxxx> > --- > kernel/bpf/verifier.c | 24 ++++++++++++++++++++++++ > 1 file changed, 24 insertions(+) > > diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c > index edb650667f44..af0274b090bb 100644 > --- a/kernel/bpf/verifier.c > +++ b/kernel/bpf/verifier.c > @@ -20072,6 +20072,30 @@ static int do_misc_fixups(struct bpf_verifier_env *env) > goto next_insn; > } > > +#ifdef CONFIG_X86_64 > + /* Implement bpf_get_smp_processor_id() inline. */ > + if (insn->imm == BPF_FUNC_get_smp_processor_id && > + prog->jit_requested && bpf_jit_supports_percpu_insn()) { > + /* BPF_FUNC_get_smp_processor_id inlining is an > + * optimization, so if pcpu_hot.cpu_number is ever > + * changed in some incompatible and hard to support > + * way, it's fine to back out this inlining logic > + */ > + insn_buf[0] = BPF_MOV32_IMM(BPF_REG_0, (u32)(unsigned long)&pcpu_hot.cpu_number); > + insn_buf[1] = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0); > + insn_buf[2] = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0); > + cnt = 3; > + > + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); > + if (!new_prog) > + return -ENOMEM; > + > + delta += cnt - 1; > + env->prog = prog = new_prog; > + insn = new_prog->insnsi + i + delta; > + goto next_insn; > + } > +#endif > /* Implement bpf_get_func_arg inline. */ > if (prog_type == BPF_PROG_TYPE_TRACING && > insn->imm == BPF_FUNC_get_func_arg) { > -- > 2.43.0 > > Acked-by: John Fastabend <john.fastabend@xxxxxxxxx>