On Thu, Aug 29, 2024 at 05:20:33PM +0200, Oleg Nesterov wrote: SNIP > diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c > index f7443e996b1b..e4eaa0363742 100644 > --- a/kernel/trace/trace_uprobe.c > +++ b/kernel/trace/trace_uprobe.c > @@ -1364,7 +1364,7 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc, struct mm_struct *mm) > return ret; > } > > -static void __uprobe_perf_func(struct trace_uprobe *tu, > +static int __uprobe_perf_func(struct trace_uprobe *tu, > unsigned long func, struct pt_regs *regs, > struct uprobe_cpu_buffer **ucbp) > { > @@ -1375,6 +1375,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, > void *data; > int size, esize; > int rctx; > + int ret = 0; > > #ifdef CONFIG_BPF_EVENTS > if (bpf_prog_array_valid(call)) { > @@ -1382,7 +1383,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, > > ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run); > if (!ret) > - return; > + return -1; > } > #endif /* CONFIG_BPF_EVENTS */ > > @@ -1392,12 +1393,13 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, > size = esize + ucb->dsize; > size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); > if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) > - return; > + return -1; > > preempt_disable(); > head = this_cpu_ptr(call->perf_events); > if (hlist_empty(head)) > goto out; right.. if the event is not added by perf_trace_add on this cpu it won't go pass this point, so no problem for perf but the issue is with bpf program triggered earlier by return uprobe created via perf event and [1] patch seems to fix that I sent out the bpf selftest that triggers the issue [2] thanks, jirka [1] https://lore.kernel.org/linux-trace-kernel/ME0P300MB0416034322B9915ECD3888649D882@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/ [2] https://lore.kernel.org/bpf/20240829194505.402807-1-jolsa@xxxxxxxxxx/T/#u > + ret = 1; > > entry = perf_trace_buf_alloc(size, NULL, &rctx); > if (!entry) > @@ -1421,6 +1423,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, > head, NULL); > out: > preempt_enable(); > + return ret; > } > > /* uprobe profile handler */ > @@ -1439,7 +1442,15 @@ static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, > struct pt_regs *regs, > struct uprobe_cpu_buffer **ucbp) > { > - __uprobe_perf_func(tu, func, regs, ucbp); > + struct trace_uprobe_filter *filter = tu->tp.event->filter; > + struct perf_event *event = list_first_entry(&filter->perf_events, > + struct perf_event, hw.tp_list); > + > + int r = __uprobe_perf_func(tu, func, regs, ucbp); > + > + pr_crit("HANDLER pid=%d consumers_target=%d stored=%d\n", > + current->pid, event->hw.target ? event->hw.target->pid : -1, r); > + > } > > int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type, >