On Wed, Jan 19, 2022 at 11:57:50PM +0900, Masami Hiramatsu wrote: SNIP > +static int kprobe_link_prog_run(struct bpf_kprobe_link *kprobe_link, > + struct pt_regs *regs) > +{ > + struct bpf_trace_run_ctx run_ctx; > + struct bpf_run_ctx *old_run_ctx; > + int err; > + > + if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { > + err = 0; > + goto out; > + } > + > + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); > + run_ctx.bpf_cookie = kprobe_link->bpf_cookie; > + > + rcu_read_lock(); > + migrate_disable(); > + err = bpf_prog_run(kprobe_link->link.prog, regs); > + migrate_enable(); > + rcu_read_unlock(); > + > + bpf_reset_run_ctx(old_run_ctx); > + > + out: > + __this_cpu_dec(bpf_prog_active); > + return err; > +} > + > +static void kprobe_link_entry_handler(struct fprobe *fp, unsigned long entry_ip, > + struct pt_regs *regs) > +{ > + struct bpf_kprobe_link *kprobe_link; > + > + /* > + * Because fprobe's regs->ip is set to the next instruction of > + * dynamic-ftrace insturction, correct entry ip must be set, so > + * that the bpf program can access entry address via regs as same > + * as kprobes. > + */ > + instruction_pointer_set(regs, entry_ip); ok, so this actually does the stall for me.. it changes the return address back to repeat the call again bu I think it's good idea to carry the original ip in regs (for bpf_get_func_ip helper) so I think we need to save it first and restore after the callback I'll make the fix and add cookie change Andrii asked for on top of your ftrace changes and let you know thanks, jirka > + kprobe_link = container_of(fp, struct bpf_kprobe_link, fp); > + kprobe_link_prog_run(kprobe_link, regs); > +} > + SNIP