> -static int match_uprobe(struct uprobe *l, struct uprobe *r) > +static int match_uprobe(struct uprobe *l, struct uprobe *r, int *match_inode) > { > + /* > + * if match_inode is non NULL then indicate if the > + * inode atleast match. > + */ > + if (match_inode) > + *match_inode = 0; > + > if (l->inode < r->inode) > return -1; > if (l->inode > r->inode) > return 1; > else { > + if (match_inode) > + *match_inode = 1; > + > if (l->offset < r->offset) > return -1; > > @@ -75,16 +86,20 @@ static int match_uprobe(struct uprobe *l, struct uprobe *r) > return 0; > } > > -static struct uprobe *__find_uprobe(struct inode * inode, loff_t offset) > +static struct uprobe *__find_uprobe(struct inode * inode, loff_t offset, > + struct rb_node **close_match) > { > struct uprobe u = { .inode = inode, .offset = offset }; > struct rb_node *n = uprobes_tree.rb_node; > struct uprobe *uprobe; > - int match; > + int match, match_inode; > > while (n) { > uprobe = rb_entry(n, struct uprobe, rb_node); > - match = match_uprobe(&u, uprobe); > + match = match_uprobe(&u, uprobe, &match_inode); > + if (close_match && match_inode) > + *close_match = n; Because: if (close_match && uprobe->inode == inode) Isn't good enough? Also, returning an rb_node just seems iffy.. > if (!match) { > atomic_inc(&uprobe->ref); > return uprobe; Why not something like: +static struct uprobe *__find_uprobe(struct inode * inode, loff_t offset, bool inode_only) +{ struct uprobe u = { .inode = inode, .offset = inode_only ? 0 : offset }; + struct rb_node *n = uprobes_tree.rb_node; + struct uprobe *uprobe; struct uprobe *ret = NULL; + int match; + + while (n) { + uprobe = rb_entry(n, struct uprobe, rb_node); + match = match_uprobe(&u, uprobe); + if (!match) { if (!inode_only) atomic_inc(&uprobe->ref); + return uprobe; + } if (inode_only && uprobe->inode == inode) ret = uprobe; + if (match < 0) + n = n->rb_left; + else + n = n->rb_right; + + } return ret; +} > +/* > + * For a given inode, build a list of probes that need to be inserted. > + */ > +static void build_probe_list(struct inode *inode, struct list_head *head) > +{ > + struct uprobe *uprobe; > + struct rb_node *n; > + unsigned long flags; > + > + n = uprobes_tree.rb_node; > + spin_lock_irqsave(&uprobes_treelock, flags); > + uprobe = __find_uprobe(inode, 0, &n); > + /* > + * If indeed there is a probe for the inode and with offset zero, > + * then lets release its reference. (ref got thro __find_uprobe) > + */ > + if (uprobe) > + put_uprobe(uprobe); The above would make this ^ unneeded. n = &uprobe->rb_node; > + for (; n; n = rb_next(n)) { > + uprobe = rb_entry(n, struct uprobe, rb_node); > + if (uprobe->inode != inode) > + break; > + list_add(&uprobe->pending_list, head); > + atomic_inc(&uprobe->ref); > + } > + spin_unlock_irqrestore(&uprobes_treelock, flags); > +} If this ever gets to be a latency issue (linear lookup under spinlock) you can use a double lock (mutex+spinlock) and require that modification acquires both but lookups can get away with either. That way you can do the linear search using a mutex instead of the spinlock. > + > +/* > + * Called from mmap_region. > + * called with mm->mmap_sem acquired. > + * > + * Return -ve no if we fail to insert probes and we cannot > + * bail-out. > + * Return 0 otherwise. i.e : > + * - successful insertion of probes > + * - (or) no possible probes to be inserted. > + * - (or) insertion of probes failed but we can bail-out. > + */ > +int mmap_uprobe(struct vm_area_struct *vma) > +{ > + struct list_head tmp_list; > + struct uprobe *uprobe, *u; > + struct inode *inode; > + int ret = 0; > + > + if (!valid_vma(vma)) > + return ret; /* Bail-out */ > + > + inode = igrab(vma->vm_file->f_mapping->host); > + if (!inode) > + return ret; > + > + INIT_LIST_HEAD(&tmp_list); > + mutex_lock(&uprobes_mmap_mutex); > + build_probe_list(inode, &tmp_list); > + list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { > + loff_t vaddr; > + > + list_del(&uprobe->pending_list); > + if (!ret && uprobe->consumers) { > + vaddr = vma->vm_start + uprobe->offset; > + vaddr -= vma->vm_pgoff << PAGE_SHIFT; > + if (vaddr < vma->vm_start || vaddr >= vma->vm_end) > + continue; > + ret = install_breakpoint(vma->vm_mm, uprobe); > + > + if (ret && (ret == -ESRCH || ret == -EEXIST)) > + ret = 0; > + } > + put_uprobe(uprobe); > + } > + > + mutex_unlock(&uprobes_mmap_mutex); > + iput(inode); > + return ret; > +} > + > +static void dec_mm_uprobes_count(struct vm_area_struct *vma, > + struct inode *inode) > +{ > + struct uprobe *uprobe; > + struct rb_node *n; > + unsigned long flags; > + > + n = uprobes_tree.rb_node; > + spin_lock_irqsave(&uprobes_treelock, flags); > + uprobe = __find_uprobe(inode, 0, &n); > + > + /* > + * If indeed there is a probe for the inode and with offset zero, > + * then lets release its reference. (ref got thro __find_uprobe) > + */ > + if (uprobe) > + put_uprobe(uprobe); > + for (; n; n = rb_next(n)) { > + loff_t vaddr; > + > + uprobe = rb_entry(n, struct uprobe, rb_node); > + if (uprobe->inode != inode) > + break; > + vaddr = vma->vm_start + uprobe->offset; > + vaddr -= vma->vm_pgoff << PAGE_SHIFT; > + if (vaddr < vma->vm_start || vaddr >= vma->vm_end) > + continue; > + atomic_dec(&vma->vm_mm->mm_uprobes_count); > + } > + spin_unlock_irqrestore(&uprobes_treelock, flags); > +} > + > +/* > + * Called in context of a munmap of a vma. > + */ > +void munmap_uprobe(struct vm_area_struct *vma) > +{ > + struct inode *inode; > + > + if (!valid_vma(vma)) > + return; /* Bail-out */ > + > + if (!atomic_read(&vma->vm_mm->mm_uprobes_count)) > + return; > + > + inode = igrab(vma->vm_file->f_mapping->host); > + if (!inode) > + return; > + > + dec_mm_uprobes_count(vma, inode); > + iput(inode); > + return; > +} One has to wonder why mmap_uprobe() can be one function but munmap_uprobe() cannot. -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href