在 2024/8/1 5:42, Andrii Nakryiko 写道: > From: Peter Zijlstra <peterz@xxxxxxxxxxxxx> > > With uprobe_unregister() having grown a synchronize_srcu(), it becomes > fairly slow to call. Esp. since both users of this API call it in a > loop. > > Peel off the sync_srcu() and do it once, after the loop. > > With recent uprobe_register()'s error handling reusing full > uprobe_unregister() call, we need to be careful about returning to the > caller before we have a guarantee that partially attached consumer won't > be called anymore. So add uprobe_unregister_sync() in the error handling > path. This is an unlikely slow path and this should be totally fine to > be slow in the case of an failed attach. > > Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> > Co-developed-by: Andrii Nakryiko <andrii@xxxxxxxxxx> > Signed-off-by: Andrii Nakryiko <andrii@xxxxxxxxxx> > --- > include/linux/uprobes.h | 8 ++++++-- > kernel/events/uprobes.c | 18 ++++++++++++++---- > kernel/trace/bpf_trace.c | 5 ++++- > kernel/trace/trace_uprobe.c | 6 +++++- > .../selftests/bpf/bpf_testmod/bpf_testmod.c | 3 ++- > 5 files changed, 31 insertions(+), 9 deletions(-) > > diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h > index a1686c1ebcb6..8f1999eb9d9f 100644 > --- a/include/linux/uprobes.h > +++ b/include/linux/uprobes.h > @@ -105,7 +105,8 @@ extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); > extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); > extern struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc); > extern int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool); > -extern void uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc); > +extern void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc); > +extern void uprobe_unregister_sync(void); [...] > static inline void > -uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) > +uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc) > +{ > +} > +static inline void uprobes_unregister_sync(void) *uprobes*_unregister_sync, is it a typo? > { > } > static inline int uprobe_mmap(struct vm_area_struct *vma) > diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c > index 3b42fd355256..b0488d356399 100644 > --- a/kernel/events/uprobes.c > +++ b/kernel/events/uprobes.c > @@ -1089,11 +1089,11 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new) > } > > /** > - * uprobe_unregister - unregister an already registered probe. > + * uprobe_unregister_nosync - unregister an already registered probe. > * @uprobe: uprobe to remove > * @uc: identify which probe if multiple probes are colocated. > */ > -void uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) > +void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc) > { > int err; > > @@ -1109,10 +1109,14 @@ void uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc) > return; > > put_uprobe(uprobe); > +} > +EXPORT_SYMBOL_GPL(uprobe_unregister_nosync); > > +void uprobe_unregister_sync(void) > +{ > synchronize_srcu(&uprobes_srcu); > } > -EXPORT_SYMBOL_GPL(uprobe_unregister); > +EXPORT_SYMBOL_GPL(uprobe_unregister_sync); > > /** > * uprobe_register - register a probe > @@ -1170,7 +1174,13 @@ struct uprobe *uprobe_register(struct inode *inode, > up_write(&uprobe->register_rwsem); > > if (ret) { > - uprobe_unregister(uprobe, uc); > + uprobe_unregister_nosync(uprobe, uc); > + /* > + * Registration might have partially succeeded, so we can have > + * this consumer being called right at this time. We need to > + * sync here. It's ok, it's unlikely slow path. > + */ > + uprobe_unregister_sync(); > return ERR_PTR(ret); > } > > diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c > index 73c570b5988b..6b632710c98e 100644 > --- a/kernel/trace/bpf_trace.c > +++ b/kernel/trace/bpf_trace.c > @@ -3184,7 +3184,10 @@ static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt) > u32 i; > > for (i = 0; i < cnt; i++) > - uprobe_unregister(uprobes[i].uprobe, &uprobes[i].consumer); > + uprobe_unregister_nosync(uprobes[i].uprobe, &uprobes[i].consumer); > + > + if (cnt) > + uprobe_unregister_sync(); > } > > static void bpf_uprobe_multi_link_release(struct bpf_link *link) > diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c > index 7eb79e0a5352..f7443e996b1b 100644 > --- a/kernel/trace/trace_uprobe.c > +++ b/kernel/trace/trace_uprobe.c > @@ -1097,6 +1097,7 @@ static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter) > static void __probe_event_disable(struct trace_probe *tp) > { > struct trace_uprobe *tu; > + bool sync = false; > > tu = container_of(tp, struct trace_uprobe, tp); > WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter)); > @@ -1105,9 +1106,12 @@ static void __probe_event_disable(struct trace_probe *tp) > if (!tu->uprobe) > continue; > > - uprobe_unregister(tu->uprobe, &tu->consumer); > + uprobe_unregister_nosync(tu->uprobe, &tu->consumer); > + sync = true; > tu->uprobe = NULL; > } > + if (sync) > + uprobe_unregister_sync(); > } > > static int probe_event_enable(struct trace_event_call *call, > diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c > index 73a6b041bcce..928c73cde32e 100644 > --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c > +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c > @@ -478,7 +478,8 @@ static void testmod_unregister_uprobe(void) > mutex_lock(&testmod_uprobe_mutex); > > if (uprobe.uprobe) { > - uprobe_unregister(uprobe.uprobe, &uprobe.consumer); > + uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer); > + uprobe_unregister_sync(); > uprobe.offset = 0; > uprobe.uprobe = NULL; > } -- BR Liao, Chang