Quoting Abdiel Janulgue (2019-08-26 13:20:58) > -static int create_mmap_offset(struct drm_i915_gem_object *obj) > +static void init_mmap_offset(struct drm_i915_gem_object *obj, > + struct i915_mmap_offset *mmo) > +{ > + mutex_lock(&obj->mmo_lock); This lock should only be guarding obj->mmap_offsets. You don't need to take it around every kref, just be careful to remove the link on close. > + kref_init(&mmo->ref); > + list_add(&mmo->offset, &obj->mmap_offsets); > + mutex_unlock(&obj->mmo_lock); > + > + mmo->obj = obj; > +} > +/* This overcomes the limitation in drm_gem_mmap's assignment of a > + * drm_gem_object as the vma->vm_private_data. Since we need to > + * be able to resolve multiple mmap offsets which could be tied > + * to a single gem object. > + */ > +int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) > +{ > + struct drm_vma_offset_node *node; > + struct drm_file *priv = filp->private_data; > + struct drm_device *dev = priv->minor->dev; > + struct i915_mmap_offset *mmo = NULL; > + struct drm_gem_object *obj = NULL; > + > + if (drm_dev_is_unplugged(dev)) > + return -ENODEV; > + > + drm_vma_offset_lock_lookup(dev->vma_offset_manager); > + node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, > + vma->vm_pgoff, > + vma_pages(vma)); > + if (likely(node)) { > + mmo = container_of(node, struct i915_mmap_offset, > + vma_node); > + /* > + * Skip 0-refcnted objects as it is in the process of being > + * destroyed and will be invalid when the vma manager lock > + * is released. > + */ > + obj = &mmo->obj->base; > + if (!kref_get_unless_zero(&obj->refcount)) > + obj = NULL; Hmm, references are still weird. This doesn't seem like it protects against Thread A Thread B mmap(fd, offset_of_A); gem_close(fd, A); Time for a gem_mmap_gtt/close-race. -Chris _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx