On Wed, Jul 31, 2024 at 02:42:50PM -0700, Andrii Nakryiko wrote: SNIP > static void put_uprobe(struct uprobe *uprobe) > { > - if (refcount_dec_and_test(&uprobe->ref)) { > - /* > - * If application munmap(exec_vma) before uprobe_unregister() > - * gets called, we don't get a chance to remove uprobe from > - * delayed_uprobe_list from remove_breakpoint(). Do it here. > - */ > - mutex_lock(&delayed_uprobe_lock); > - delayed_uprobe_remove(uprobe, NULL); > - mutex_unlock(&delayed_uprobe_lock); > - kfree(uprobe); > - } > + if (!refcount_dec_and_test(&uprobe->ref)) > + return; > + > + write_lock(&uprobes_treelock); > + > + if (uprobe_is_active(uprobe)) > + rb_erase(&uprobe->rb_node, &uprobes_tree); > + > + write_unlock(&uprobes_treelock); > + > + /* > + * If application munmap(exec_vma) before uprobe_unregister() > + * gets called, we don't get a chance to remove uprobe from > + * delayed_uprobe_list from remove_breakpoint(). Do it here. > + */ > + mutex_lock(&delayed_uprobe_lock); > + delayed_uprobe_remove(uprobe, NULL); > + mutex_unlock(&delayed_uprobe_lock); we should do kfree(uprobe) in here, right? I think this is fixed later on when uprobe_free_rcu is introduced SNIP > @@ -1159,27 +1180,16 @@ struct uprobe *uprobe_register(struct inode *inode, > if (!IS_ALIGNED(ref_ctr_offset, sizeof(short))) > return ERR_PTR(-EINVAL); > > - retry: > uprobe = alloc_uprobe(inode, offset, ref_ctr_offset); > if (IS_ERR(uprobe)) > return uprobe; > > - /* > - * We can race with uprobe_unregister()->delete_uprobe(). > - * Check uprobe_is_active() and retry if it is false. > - */ > down_write(&uprobe->register_rwsem); > - ret = -EAGAIN; > - if (likely(uprobe_is_active(uprobe))) { > - consumer_add(uprobe, uc); > - ret = register_for_each_vma(uprobe, uc); > - } > + consumer_add(uprobe, uc); > + ret = register_for_each_vma(uprobe, uc); > up_write(&uprobe->register_rwsem); > - put_uprobe(uprobe); > > if (ret) { > - if (unlikely(ret == -EAGAIN)) > - goto retry; nice, I like getting rid of this.. so far lgtm ;-) jirka > uprobe_unregister(uprobe, uc); > return ERR_PTR(ret); > } > @@ -1286,15 +1296,19 @@ static void build_probe_list(struct inode *inode, > u = rb_entry(t, struct uprobe, rb_node); > if (u->inode != inode || u->offset < min) > break; > + u = try_get_uprobe(u); > + if (!u) /* uprobe already went away, safe to ignore */ > + continue; > list_add(&u->pending_list, head); > - get_uprobe(u); > } > for (t = n; (t = rb_next(t)); ) { > u = rb_entry(t, struct uprobe, rb_node); > if (u->inode != inode || u->offset > max) > break; > + u = try_get_uprobe(u); > + if (!u) /* uprobe already went away, safe to ignore */ > + continue; > list_add(&u->pending_list, head); > - get_uprobe(u); > } > } > read_unlock(&uprobes_treelock); > @@ -1752,6 +1766,12 @@ static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask) > return -ENOMEM; > > *n = *o; > + /* > + * uprobe's refcnt has to be positive at this point, kept by > + * utask->return_instances items; return_instances can't be > + * removed right now, as task is blocked due to duping; so > + * get_uprobe() is safe to use here. > + */ > get_uprobe(n->uprobe); > n->next = NULL; > > @@ -1894,7 +1914,10 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) > } > orig_ret_vaddr = utask->return_instances->orig_ret_vaddr; > } > - > + /* > + * uprobe's refcnt is positive, held by caller, so it's safe to > + * unconditionally bump it one more time here > + */ > ri->uprobe = get_uprobe(uprobe); > ri->func = instruction_pointer(regs); > ri->stack = user_stack_pointer(regs); > -- > 2.43.0 >