The current bpf_get_func_ip_kprobe helper does not work properly, when used in ebpf program triggered by the new multi kprobes. We can't use kprobe's func_addr in bpf_get_func_ip_kprobe helper, because there are multiple functions registered for single kprobe object. Adding new per cpu variable current_ftrace_multi_addr and extra address in kretprobe_instance object to keep current traced function address for each cpu for both kprobe handler and kretprobe trampoline. The address value is set/passed as follows, for kprobe: kprobe_ftrace_multi_handler { old = kprobe_ftrace_multi_addr_set(ip); handler.. kprobe_ftrace_multi_addr_set(old); } For kretprobe: kprobe_ftrace_multi_handler { old = kprobe_ftrace_multi_addr_set(ip); ... pre_handler_kretprobe { ri->ftrace_multi_addr = kprobe_ftrace_multi_addr } ... kprobe_ftrace_multi_addr_set(old); } __kretprobe_trampoline_handler { prev_func_addr = kprobe_ftrace_multi_addr_set(ri->ftrace_multi_addr); handler.. kprobe_ftrace_multi_addr_set(prev_func_addr); } Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx> --- arch/x86/kernel/kprobes/ftrace.c | 3 +++ include/linux/kprobes.h | 26 ++++++++++++++++++++++++++ kernel/kprobes.c | 6 ++++++ kernel/trace/bpf_trace.c | 7 ++++++- 4 files changed, 41 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index ac4d256b89c6..8caaa58c3a64 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -72,6 +72,7 @@ NOKPROBE_SYMBOL(kprobe_ftrace_handler); void kprobe_ftrace_multi_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs) { + unsigned long old; struct kprobe *p; int bit; @@ -79,8 +80,10 @@ void kprobe_ftrace_multi_handler(unsigned long ip, unsigned long parent_ip, if (bit < 0) return; + old = kprobe_ftrace_multi_addr_set(ip); p = container_of(ops, struct kprobe, multi.ops); ftrace_handler(p, ip, fregs); + kprobe_ftrace_multi_addr_set(old); ftrace_test_recursion_unlock(bit); } diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index a31da6202b5c..3f0522b9538b 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -191,6 +191,7 @@ struct kretprobe_instance { struct kretprobe_holder *rph; kprobe_opcode_t *ret_addr; void *fp; + unsigned long ftrace_multi_addr; char data[]; }; @@ -387,16 +388,37 @@ static inline void wait_for_kprobe_optimizer(void) { } #endif /* CONFIG_OPTPROBES */ #ifdef CONFIG_KPROBES_ON_FTRACE +DECLARE_PER_CPU(unsigned long, current_ftrace_multi_addr); extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs); extern void kprobe_ftrace_multi_handler(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *ops, struct ftrace_regs *fregs); extern int arch_prepare_kprobe_ftrace(struct kprobe *p); + +static inline unsigned long kprobe_ftrace_multi_addr(void) +{ + return __this_cpu_read(current_ftrace_multi_addr); +} +static inline unsigned long kprobe_ftrace_multi_addr_set(unsigned long addr) +{ + unsigned long old = __this_cpu_read(current_ftrace_multi_addr); + + __this_cpu_write(current_ftrace_multi_addr, addr); + return old; +} #else static inline int arch_prepare_kprobe_ftrace(struct kprobe *p) { return -EINVAL; } +static inline unsigned long kprobe_ftrace_multi_addr_set(unsigned long addr) +{ + return 0; +} +static inline unsigned long kprobe_ftrace_multi_addr(void) +{ + return 0; +} #endif /* CONFIG_KPROBES_ON_FTRACE */ /* Get the kprobe at this addr (if any) - called with preemption disabled */ @@ -514,6 +536,10 @@ static inline int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, { return -ERANGE; } +static inline unsigned long kprobe_ftrace_multi_addr(void) +{ + return 0; +} #endif /* CONFIG_KPROBES */ static inline int disable_kretprobe(struct kretprobe *rp) diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 04fc411ca30c..6ba249f3a0cb 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1064,6 +1064,8 @@ static bool in_kretprobe_blacklist(void *addr) } #ifdef CONFIG_KPROBES_ON_FTRACE +DEFINE_PER_CPU(unsigned long, current_ftrace_multi_addr); + static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { .func = kprobe_ftrace_handler, .flags = FTRACE_OPS_FL_SAVE_REGS, @@ -2106,11 +2108,14 @@ unsigned long __kretprobe_trampoline_handler(struct pt_regs *regs, rp = get_kretprobe(ri); if (rp && rp->handler) { struct kprobe *prev = kprobe_running(); + unsigned long prev_func_addr; + prev_func_addr = kprobe_ftrace_multi_addr_set(ri->ftrace_multi_addr); __this_cpu_write(current_kprobe, &rp->kp); ri->ret_addr = correct_ret_addr; rp->handler(ri, regs); __this_cpu_write(current_kprobe, prev); + kprobe_ftrace_multi_addr_set(prev_func_addr); } if (first == node) break; @@ -2161,6 +2166,7 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) } arch_prepare_kretprobe(ri, regs); + ri->ftrace_multi_addr = kprobe_ftrace_multi_addr(); __llist_add(&ri->llist, ¤t->kretprobe_instances); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 25631253084a..39f4d476cfca 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1026,7 +1026,12 @@ BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) { struct kprobe *kp = kprobe_running(); - return kp ? (uintptr_t)kp->func_addr : 0; + if (!kp) + return 0; + if (kprobe_ftrace_multi(kp)) + return (uintptr_t) kprobe_ftrace_multi_addr(); + else + return (uintptr_t) kp->func_addr; } static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { -- 2.33.1