Hi On Thu, Oct 15, 2015 at 9:36 AM, Daniel Vetter <daniel.vetter@xxxxxxxx> wrote: > Compared to wrapping the final kref_put with dev->struct_mutex this > allows us to only acquire the offset manager look both in the final > cleanup and in the lookup. Which has the upside that no locks leak out > of the core abstractions. > > Also, this is the final bit which required dev->struct_mutex in gem > core, now modern drivers can be completely struct_mutex free! > > This needs a new drm_vma_offset_exact_lookup_locked and makes both > drm_vma_offset_exact_lookup and drm_vma_offset_lookup unused. > > Also extract __drm_gem_mmap_obj which is just drm_gem_mmap_obj without > the obj_reference call to share code. > > Signed-off-by: Daniel Vetter <daniel.vetter@xxxxxxxxx> > --- > drivers/gpu/drm/drm_gem.c | 69 +++++++++++++++++++++++---------------- > drivers/gpu/drm/drm_vma_manager.c | 40 +++++++---------------- > include/drm/drm_vma_manager.h | 22 ++++--------- > 3 files changed, 58 insertions(+), 73 deletions(-) > > diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c > index ab8ea42264f4..795d64fa7cb9 100644 > --- a/drivers/gpu/drm/drm_gem.c > +++ b/drivers/gpu/drm/drm_gem.c > @@ -790,6 +790,27 @@ void drm_gem_vm_close(struct vm_area_struct *vma) > } > EXPORT_SYMBOL(drm_gem_vm_close); > > +static int __drm_gem_mmap_obj(struct drm_gem_object *obj, > + unsigned long obj_size, > + struct vm_area_struct *vma) > +{ > + struct drm_device *dev = obj->dev; > + > + /* Check for valid size. */ > + if (obj_size < vma->vm_end - vma->vm_start) > + return -EINVAL; > + > + if (!dev->driver->gem_vm_ops) > + return -EINVAL; > + > + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; > + vma->vm_ops = dev->driver->gem_vm_ops; > + vma->vm_private_data = obj; Slightly ugly that this function *consumes* the rec-count of 'obj', but only in the success case. Which, btw., makes your function below leak the ref-count. I'd prefer if you move the assignment of "vm_private_data" into the caller, which makes the ref-counting obvious. > + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); > + > + return 0; > +} > + > /** > * drm_gem_mmap_obj - memory map a GEM object > * @obj: the GEM object to map > @@ -817,19 +838,11 @@ EXPORT_SYMBOL(drm_gem_vm_close); > int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, > struct vm_area_struct *vma) > { > - struct drm_device *dev = obj->dev; > - > - /* Check for valid size. */ > - if (obj_size < vma->vm_end - vma->vm_start) > - return -EINVAL; > - > - if (!dev->driver->gem_vm_ops) > - return -EINVAL; > + int ret; > > - vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; > - vma->vm_ops = dev->driver->gem_vm_ops; > - vma->vm_private_data = obj; > - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); > + ret = __drm_gem_mmap_obj(obj, obj_size, vma); > + if (ret) > + return ret; > > /* Take a ref for this mapping of the object, so that the fault > * handler can dereference the mmap offset's pointer to the object. > @@ -862,31 +875,29 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) > { > struct drm_file *priv = filp->private_data; > struct drm_device *dev = priv->minor->dev; > - struct drm_gem_object *obj; > + struct drm_gem_object *obj = NULL; > struct drm_vma_offset_node *node; > - int ret; > > if (drm_device_is_unplugged(dev)) > return -ENODEV; > > - mutex_lock(&dev->struct_mutex); > + drm_vma_offset_lock_lookup(dev->vma_offset_manager); > + node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, > + vma->vm_pgoff, > + vma_pages(vma)); > + if (likely(node)) { > + obj = container_of(node, struct drm_gem_object, vma_node); > + if (!kref_get_unless_zero(&obj->refcount)) > + obj = NULL; > + } > + drm_vma_offset_unlock_lookup(dev->vma_offset_manager); Looks good! > > - node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, > - vma->vm_pgoff, > - vma_pages(vma)); > - if (!node) { > - mutex_unlock(&dev->struct_mutex); > + if (!obj) > return -EINVAL; > - } else if (!drm_vma_node_is_allowed(node, filp)) { > - mutex_unlock(&dev->struct_mutex); > + else if (!drm_vma_node_is_allowed(node, filp)) > return -EACCES; You need to drop the 'obj' reference here. > - } > - > - obj = container_of(node, struct drm_gem_object, vma_node); > - ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); > > - mutex_unlock(&dev->struct_mutex); > - > - return ret; > + return __drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, > + vma); If this fails, you need to drop the 'obj' reference here. Thanks David > } > EXPORT_SYMBOL(drm_gem_mmap); > diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c > index 68c1f32fb086..2f2ecde8285b 100644 > --- a/drivers/gpu/drm/drm_vma_manager.c > +++ b/drivers/gpu/drm/drm_vma_manager.c > @@ -112,7 +112,7 @@ void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr) > EXPORT_SYMBOL(drm_vma_offset_manager_destroy); > > /** > - * drm_vma_offset_lookup() - Find node in offset space > + * drm_vma_offset_lookup_locked() - Find node in offset space > * @mgr: Manager object > * @start: Start address for object (page-based) > * @pages: Size of object (page-based) > @@ -122,37 +122,21 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy); > * region and the given node will be returned, as long as the node spans the > * whole requested area (given the size in number of pages as @pages). > * > - * RETURNS: > - * Returns NULL if no suitable node can be found. Otherwise, the best match > - * is returned. It's the caller's responsibility to make sure the node doesn't > - * get destroyed before the caller can access it. > - */ > -struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr, > - unsigned long start, > - unsigned long pages) > -{ > - struct drm_vma_offset_node *node; > - > - read_lock(&mgr->vm_lock); > - node = drm_vma_offset_lookup_locked(mgr, start, pages); > - read_unlock(&mgr->vm_lock); > - > - return node; > -} > -EXPORT_SYMBOL(drm_vma_offset_lookup); > - > -/** > - * drm_vma_offset_lookup_locked() - Find node in offset space > - * @mgr: Manager object > - * @start: Start address for object (page-based) > - * @pages: Size of object (page-based) > + * Note that before lookup the vma offset manager lookup lock must be acquired > + * with drm_vma_offset_lock_lookup(). See there for an example. This can then be > + * used to implement weakly referenced lookups using kref_get_unless_zero(). > * > - * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup > - * manually. See drm_vma_offset_lock_lookup() for an example. > + * Example: > + * drm_vma_offset_lock_lookup(mgr); > + * node = drm_vma_offset_lookup_locked(mgr); > + * if (node) > + * kref_get_unless_zero(container_of(node, sth, entr)); > + * drm_vma_offset_unlock_lookup(mgr); > * > * RETURNS: > * Returns NULL if no suitable node can be found. Otherwise, the best match > - * is returned. > + * is returned. It's the caller's responsibility to make sure the node doesn't > + * get destroyed before the caller can access it. > */ > struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, > unsigned long start, > diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h > index 74f3b38c43c1..2f63dd5e05eb 100644 > --- a/include/drm/drm_vma_manager.h > +++ b/include/drm/drm_vma_manager.h > @@ -54,9 +54,6 @@ void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr, > unsigned long page_offset, unsigned long size); > void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr); > > -struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr, > - unsigned long start, > - unsigned long pages); > struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr, > unsigned long start, > unsigned long pages); > @@ -71,25 +68,25 @@ bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node, > struct file *filp); > > /** > - * drm_vma_offset_exact_lookup() - Look up node by exact address > + * drm_vma_offset_exact_lookup_locked() - Look up node by exact address > * @mgr: Manager object > * @start: Start address (page-based, not byte-based) > * @pages: Size of object (page-based) > * > - * Same as drm_vma_offset_lookup() but does not allow any offset into the node. > + * Same as drm_vma_offset_lookup_locked() but does not allow any offset into the node. > * It only returns the exact object with the given start address. > * > * RETURNS: > * Node at exact start address @start. > */ > static inline struct drm_vma_offset_node * > -drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr, > - unsigned long start, > - unsigned long pages) > +drm_vma_offset_exact_lookup_locked(struct drm_vma_offset_manager *mgr, > + unsigned long start, > + unsigned long pages) > { > struct drm_vma_offset_node *node; > > - node = drm_vma_offset_lookup(mgr, start, pages); > + node = drm_vma_offset_lookup_locked(mgr, start, pages); > return (node && node->vm_node.start == start) ? node : NULL; > } > > @@ -108,13 +105,6 @@ drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr, > * not call any other VMA helpers while holding this lock. > * > * Note: You're in atomic-context while holding this lock! > - * > - * Example: > - * drm_vma_offset_lock_lookup(mgr); > - * node = drm_vma_offset_lookup_locked(mgr); > - * if (node) > - * kref_get_unless_zero(container_of(node, sth, entr)); > - * drm_vma_offset_unlock_lookup(mgr); > */ > static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr) > { > -- > 2.5.1 > > _______________________________________________ > dri-devel mailing list > dri-devel@xxxxxxxxxxxxxxxxxxxxx > http://lists.freedesktop.org/mailman/listinfo/dri-devel _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/dri-devel