Instead of force unbinding and rebinding every time, we try to check if our notifier seqcount is still correct when pages are bound. This way we only rebind userptr when we need to, and prevent stalls. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@xxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 29 ++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 327f01b04f21..dad68de2159f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -268,12 +268,35 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) if (ret) return ret; - /* Make sure userptr is unbound for next attempt, so we don't use stale pages. */ - ret = i915_gem_object_userptr_unbind(obj, false); + /* optimistically try to preserve current pages while unlocked */ + if (i915_gem_object_has_pages(obj) && + !mmu_interval_check_retry(&obj->userptr.notifier, + obj->userptr.notifier_seq)) { + ret = mutex_lock_interruptible(&i915->mm.notifier_lock); + if (!ret) { + if (obj->userptr.pvec && + !mmu_interval_read_retry(&obj->userptr.notifier, + obj->userptr.notifier_seq)) { + obj->userptr.page_ref++; + mutex_unlock(&i915->mm.notifier_lock); + + /* We can keep using the current binding, this is the fastpath */ + ret = 1; + } + } + } + + if (!ret) { + /* Make sure userptr is unbound for next attempt, so we don't use stale pages. */ + ret = i915_gem_object_userptr_unbind(obj, false); + } i915_gem_object_unlock(obj); - if (ret) + if (ret < 0) return ret; + if (ret > 0) + return 0; + notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier); pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL); -- 2.28.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx