On Fri, 2011-06-24 at 07:36 +0530, Srikar Dronamraju wrote: > > > > so I am thinking of a solution that includes most of your ideas along > > with using i_mmap_mutex in mmap_uprobe path. > > > > Addressing Peter's comments given on irc wrt i_mmap_mutex. > > void _unregister_uprobe(...) > { > if (!del_consumer(...)) { // includes tree removal on last consumer > return; > } > if (uprobe->consumers) > return; > > mutex_lock(&mapping->i_mmap_mutex); //sync with mmap. > vma_prio_tree_foreach() { > // create list > } > > mutex_unlock(&mapping->i_mmap_mutex); > > list_for_each_entry_safe() { > // remove from list > down_read(&mm->mmap_sem); > remove_breakpoint(); // unconditional, if it wasn't there > up_read(&mm->mmap_sem); > } > > mutex_lock(&mapping->i_mmap_mutex); > delete_uprobe(uprobe); > mutex_unlock(&mapping->i_mmap_mutex); > > inode->uprobes_count--; > mutex_unlock(&inode->i_mutex); Right, so this lonesome unlock got me puzzled for a while, I always find it best not to do asymmetric locking like this, keep the lock and unlock in the same function. > } > > int register_uprobe(...) > { > uprobe = alloc_uprobe(...); // find or insert in tree > > mutex_lock(&inode->i_mutex); // sync with register/unregister > if (uprobe->consumers) { > add_consumer(); > goto put_unlock; > } > add_consumer(); > inode->uprobes_count++; > mutex_lock(&mapping->i_mmap_mutex); //sync with mmap. > vma_prio_tree_foreach(..) { > // get mm ref, add to list blah blah > } > > mutex_unlock(&mapping->i_mmap_mutex); > list_for_each_entry_safe() { > if (ret) { > // del from list etc.. > // > continue; > } > down_read(mm->mmap_sem); > ret = install_breakpoint(); > up_read(..); > // del from list etc.. > // > if (ret && (ret == -ESRCH || ret == -EEXIST)) > ret = 0; > } > > if (ret) > _unregister_uprobe(); > > put_unlock: > mutex_unlock(&inode->i_mutex); You see, now this is a double unlock > put_uprobe(uprobe); > return ret; > } > > void unregister_uprobe(...) > { > mutex_lock(&inode->i_mutex); // sync with register/unregister > uprobe = find_uprobe(); // ref++ > _unregister_uprobe(); > mutex_unlock(&inode->i_mutex); idem > put_uprobe(uprobe); > } > > int mmap_uprobe(struct vm_area_struct *vma) > { > struct list_head tmp_list; > struct uprobe *uprobe, *u; > struct mm_struct *mm; > struct inode *inode; > int ret = 0; > > if (!valid_vma(vma)) > return ret; /* Bail-out */ > > mm = vma->vm_mm; > inode = vma->vm_file->f_mapping->host; > if (inode->uprobes_count) > return ret; > __iget(inode); > > INIT_LIST_HEAD(&tmp_list); > > mutex_lock(&mapping->i_mmap_mutex); > add_to_temp_list(vma, inode, &tmp_list); > list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { > loff_t vaddr; > > list_del(&uprobe->pending_list); > if (ret) > continue; > > vaddr = vma->vm_start + uprobe->offset; > vaddr -= vma->vm_pgoff << PAGE_SHIFT; > ret = install_breakpoint(mm, uprobe, vaddr); Right, so this is the problem, you cannot do allocations under i_mmap_mutex, however I think you can under i_mutex. > if (ret && (ret == -ESRCH || ret == -EEXIST)) > ret = 0; > } > > mutex_unlock(&mapping->i_mmap_mutex); > iput(inode); > return ret; > } > > int munmap_uprobe(struct vm_area_struct *vma) > { > struct list_head tmp_list; > struct uprobe *uprobe, *u; > struct mm_struct *mm; > struct inode *inode; > int ret = 0; > > if (!valid_vma(vma)) > return ret; /* Bail-out */ > > mm = vma->vm_mm; > inode = vma->vm_file->f_mapping->host; > if (inode->uprobes_count) > return ret; Should that be !->uprobes_count? > // walk thro RB tree and decrement mm->uprobes_count > walk_rbtree_and_dec_uprobes_count(); //hold treelock. > > return ret; > } -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href