Currently we guarantee that vmas stay alive when the object lock is held only if we also hold a private vm reference. In order to relax the latter requirement, take the object lock also when destroying vmas from the vm destruction path. Signed-off-by: Thomas Hellström <thomas.hellstrom@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gt/intel_gtt.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index b67831833c9a..92e22f727f88 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -112,7 +112,10 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object static void clear_vm_list(struct list_head *list) { struct i915_vma *vma, *vn; + bool unlocked; +restart: + unlocked = false; list_for_each_entry_safe(vma, vn, list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; @@ -138,8 +141,22 @@ static void clear_vm_list(struct list_head *list) i915_vm_resv_get(vma->vm); vma->vm_ddestroy = true; } else { + if (!i915_gem_object_trylock(obj, NULL)) { + unlocked = true; + mutex_unlock(&vma->vm->mutex); + i915_gem_object_lock(obj, NULL); + mutex_lock(&vma->vm->mutex); + /* + * The vma may now be on a different list, + * but not destroyed. We don't care. + * destroy it. + */ + } i915_vma_destroy_locked(vma); + i915_gem_object_unlock(obj); i915_gem_object_put(obj); + if (unlocked) + goto restart; } } -- 2.36.1