Now that i915_vma_parked() is taking the object lock on vma destruction, and the only user of the vma refcount, i915_gem_object_unbind() also takes the object lock, remove the vma refcount. Signed-off-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem.c | 17 +++++++++++++---- drivers/gpu/drm/i915/i915_vma.c | 14 +++----------- drivers/gpu/drm/i915/i915_vma.h | 14 -------------- drivers/gpu/drm/i915/i915_vma_types.h | 1 - 4 files changed, 16 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index dd84ebabb50f..c26110abcc0b 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -151,14 +151,25 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, break; } + /* + * Requiring the vm destructor to take the object lock + * before destroying a vma would help us eliminate the + * i915_vm_tryget() here, AND thus also the barrier stuff + * at the end. That's an easy fix, but sleeping locks in + * a kthread should generally be avoided. + */ ret = -EAGAIN; if (!i915_vm_tryget(vma->vm)) break; - /* Prevent vma being freed by i915_vma_parked as we unbind */ - vma = __i915_vma_get(vma); spin_unlock(&obj->vma.lock); + /* + * Since i915_vma_parked() takes the object lock + * before vma destruction, it won't race us here, + * and destroy the vma from under us. + */ + if (vma) { bool vm_trylock = !!(flags & I915_GEM_OBJECT_UNBIND_VM_TRYLOCK); ret = -EBUSY; @@ -180,8 +191,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, ret = i915_vma_unbind(vma); } } - - __i915_vma_put(vma); } i915_vm_put(vma->vm); diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 91538bc38110..6fd25b39748f 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -122,7 +122,6 @@ vma_create(struct drm_i915_gem_object *obj, if (vma == NULL) return ERR_PTR(-ENOMEM); - kref_init(&vma->ref); vma->ops = &vm->vma_ops; vma->obj = obj; vma->size = obj->base.size; @@ -1628,15 +1627,6 @@ void i915_vma_reopen(struct i915_vma *vma) __i915_vma_remove_closed(vma); } -void i915_vma_release(struct kref *ref) -{ - struct i915_vma *vma = container_of(ref, typeof(*vma), ref); - - i915_active_fini(&vma->active); - GEM_WARN_ON(vma->resource); - i915_vma_free(vma); -} - static void force_unbind(struct i915_vma *vma) { if (!drm_mm_node_allocated(&vma->node)) @@ -1665,7 +1655,9 @@ static void release_references(struct i915_vma *vma, bool vm_ddestroy) if (vm_ddestroy) i915_vm_resv_put(vma->vm); - __i915_vma_put(vma); + i915_active_fini(&vma->active); + GEM_WARN_ON(vma->resource); + i915_vma_free(vma); } /** diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 67ae7341c7e0..6034991d89fe 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -222,20 +222,6 @@ void i915_vma_unlink_ctx(struct i915_vma *vma); void i915_vma_close(struct i915_vma *vma); void i915_vma_reopen(struct i915_vma *vma); -static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma) -{ - if (kref_get_unless_zero(&vma->ref)) - return vma; - - return NULL; -} - -void i915_vma_release(struct kref *ref); -static inline void __i915_vma_put(struct i915_vma *vma) -{ - kref_put(&vma->ref, i915_vma_release); -} - void i915_vma_destroy_locked(struct i915_vma *vma); void i915_vma_destroy(struct i915_vma *vma); diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h index eac36be184e5..be6e028c3b57 100644 --- a/drivers/gpu/drm/i915/i915_vma_types.h +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -211,7 +211,6 @@ struct i915_vma { * handles (but same file) for execbuf, i.e. the number of aliases * that exist in the ctx->handle_vmas LUT for this vma. */ - struct kref ref; atomic_t open_count; atomic_t flags; /** -- 2.34.1