If we cannot handle a vma within the unbind loop, try to flush the pending events (i915_vma_parked, i915_vm_release) and try again. This avoids a round trip to userspace that is not guaranteed to make forward progress, as the events we wait upon require being idle. References: cb6c3d45f948 ("drm/i915/gem: Avoid parking the vma as we unbind") Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Cc: Matthew Auld <matthew.auld@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index eb9e2609c569..377487a0762e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -123,7 +123,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, LIST_HEAD(still_in_list); intel_wakeref_t wakeref; struct i915_vma *vma; - int ret = 0; + int ret; if (!atomic_read(&obj->bind_count)) return 0; @@ -136,6 +136,8 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, */ wakeref = intel_runtime_pm_get(rpm); +try_again: + ret = 0; spin_lock(&obj->vma.lock); while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, struct i915_vma, @@ -153,6 +155,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, } else { if (i915_vma_is_closed(vma)) { spin_unlock(&obj->vma.lock); + i915_vma_parked(vm->gt); goto err_vm; } } @@ -174,6 +177,11 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, list_splice(&still_in_list, &obj->vma.list); spin_unlock(&obj->vma.lock); + if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_ACTIVE) { + rcu_barrier(); /* flush the i915_vm_release() */ + goto try_again; + } + intel_runtime_pm_put(rpm, wakeref); return ret; -- 2.24.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx