On Thu, Nov 14, 2019 at 5:04 PM Daniel Borkmann <daniel@xxxxxxxxxxxxx> wrote: > > Add BPF_MOD_{NOP_TO_JUMP,JUMP_TO_JUMP,JUMP_TO_NOP} patching for x86 > JIT in order to be able to patch direct jumps or nop them out. We need > this facility in order to patch tail call jumps and in later work also > BPF static keys. > > Signed-off-by: Daniel Borkmann <daniel@xxxxxxxxxxxxx> > --- just naming nits, looks good otherwise > arch/x86/net/bpf_jit_comp.c | 64 ++++++++++++++++++++++++++----------- > include/linux/bpf.h | 6 ++++ > 2 files changed, 52 insertions(+), 18 deletions(-) > > diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c > index 2e586f579945..66921f2aeece 100644 > --- a/arch/x86/net/bpf_jit_comp.c > +++ b/arch/x86/net/bpf_jit_comp.c > @@ -203,8 +203,9 @@ struct jit_context { > /* Maximum number of bytes emitted while JITing one eBPF insn */ > #define BPF_MAX_INSN_SIZE 128 > #define BPF_INSN_SAFETY 64 > -/* number of bytes emit_call() needs to generate call instruction */ > -#define X86_CALL_SIZE 5 > + > +/* Number of bytes emit_patchable() needs to generate instructions */ > +#define X86_PATCHABLE_SIZE 5 > > #define PROLOGUE_SIZE 25 > > @@ -215,7 +216,7 @@ struct jit_context { > static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf) > { > u8 *prog = *pprog; > - int cnt = X86_CALL_SIZE; > + int cnt = X86_PATCHABLE_SIZE; > > /* BPF trampoline can be made to work without these nops, > * but let's waste 5 bytes for now and optimize later > @@ -480,64 +481,91 @@ static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off) > *pprog = prog; > } > > -static int emit_call(u8 **pprog, void *func, void *ip) > +static int emit_patchable(u8 **pprog, void *func, void *ip, u8 b1) I'd strongly prefer opcode instead of b1 :) also would emit_patch() be a terrible name? > { > u8 *prog = *pprog; > int cnt = 0; > s64 offset; > [...] > case BPF_MOD_CALL_TO_NOP: > - if (memcmp(ip, old_insn, X86_CALL_SIZE)) > + case BPF_MOD_JUMP_TO_NOP: > + if (memcmp(ip, old_insn, X86_PATCHABLE_SIZE)) > goto out; > - text_poke_bp(ip, ideal_nops[NOP_ATOMIC5], X86_CALL_SIZE, NULL); > + text_poke_bp(ip, ideal_nops[NOP_ATOMIC5], X86_PATCHABLE_SIZE, maybe keep it shorter with X86_PATCH_SIZE? > + NULL); > break; > } > ret = 0; [...]