Andrii Nakryiko wrote: > Add BPF instructions for working with per-CPU data. These instructions > are internal-only and users are not allowed to use them directly. They > will only be used for internal inlining optimizations for now. > > Two different instructions are added. One, with BPF_MEM_PERCPU opcode, > performs memory dereferencing of a per-CPU "address" (which is actually > an offset). This one is useful when inlined logic needs to load data > stored in per-CPU storage (bpf_get_smp_processor_id() is one such > example). > > Another, with BPF_ADDR_PERCPU opcode, performs a resolution of a per-CPU > address (offset) stored in a register. This one is useful anywhere where > per-CPU data is not read, but rather is returned to user as just > absolute raw memory pointer (useful in bpf_map_lookup_elem() helper > inlinings, for example). > > BPF disassembler is also taught to recognize them to support dumping > final BPF assembly code (non-JIT'ed version). > > Add arch-specific way for BPF JITs to mark support for this instructions. > > This patch also adds support for these instructions in x86-64 BPF JIT. > > Signed-off-by: Andrii Nakryiko <andrii@xxxxxxxxxx> > --- > arch/x86/net/bpf_jit_comp.c | 29 +++++++++++++++++++++++++++++ > include/linux/filter.h | 27 +++++++++++++++++++++++++++ > kernel/bpf/core.c | 5 +++++ > kernel/bpf/disasm.c | 33 ++++++++++++++++++++++++++------- > 4 files changed, 87 insertions(+), 7 deletions(-) > > diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c > index 3b639d6f2f54..610bbedaae70 100644 > --- a/arch/x86/net/bpf_jit_comp.c > +++ b/arch/x86/net/bpf_jit_comp.c > @@ -1910,6 +1910,30 @@ st: if (is_imm8(insn->off)) > } > break; > > + /* internal-only per-cpu zero-extending memory load */ > + case BPF_LDX | BPF_MEM_PERCPU | BPF_B: > + case BPF_LDX | BPF_MEM_PERCPU | BPF_H: > + case BPF_LDX | BPF_MEM_PERCPU | BPF_W: > + case BPF_LDX | BPF_MEM_PERCPU | BPF_DW: > + insn_off = insn->off; > + EMIT1(0x65); /* gs segment modifier */ > + emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn_off); > + break; > + > + /* internal-only load-effective-address-of per-cpu offset */ > + case BPF_LDX | BPF_ADDR_PERCPU | BPF_DW: { > + u32 off = (u32)(void *)&this_cpu_off; > + > + /* mov <dst>, <src> (if necessary) */ > + EMIT_mov(dst_reg, src_reg); > + > + /* add <dst>, gs:[<off>] */ > + EMIT2(0x65, add_1mod(0x48, dst_reg)); > + EMIT3(0x03, add_1reg(0x04, dst_reg), 0x25); > + EMIT(off, 4); > + > + break; > + } > case BPF_STX | BPF_ATOMIC | BPF_W: > case BPF_STX | BPF_ATOMIC | BPF_DW: > if (insn->imm == (BPF_AND | BPF_FETCH) || [..] > +/* Per-CPU zero-extending memory load (internal-only) */ > +#define BPF_LDX_MEM_PERCPU(SIZE, DST, SRC, OFF) \ > + ((struct bpf_insn) { \ > + .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM_PERCPU,\ > + .dst_reg = DST, \ > + .src_reg = SRC, \ > + .off = OFF, \ > + .imm = 0 }) > + > +/* Load effective address of a given per-CPU offset */ > +#define BPF_LDX_ADDR_PERCPU(DST, SRC, OFF) \ Do you need OFF here? It seems the above is using &this_cpu_off. > + ((struct bpf_insn) { \ > + .code = BPF_LDX | BPF_DW | BPF_ADDR_PERCPU, \ > + .dst_reg = DST, \ > + .src_reg = SRC, \ > + .off = OFF, \ > + .imm = 0 }) > +