This simply hides the EAGAIN caused by userptr when userspace causes resource contention. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_dma.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 8 ++++++++ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 3 +++ drivers/gpu/drm/i915/i915_gem_userptr.c | 16 +++++++++++++--- 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7d85c3bea02a..c1afbd873197 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1192,6 +1192,7 @@ int i915_driver_unload(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); intel_fbc_cleanup_cfb(dev_priv); i915_gem_cleanup_stolen(dev); + i915_gem_cleanup_userptr(dev); intel_csr_ucode_fini(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 601ef7412cf9..a4311e2d2140 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1295,6 +1295,13 @@ struct i915_gem_mm { struct delayed_work idle_work; /** + * Workqueue to fault in userptr pages, flushed by the execbuf + * when required but otherwise left to userspace to try again + * on EAGAIN. + */ + struct workqueue_struct *userptr_wq; + + /** * Are we in a non-interruptible section of code like * modesetting? */ @@ -2724,6 +2731,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data, int i915_gem_get_tiling(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_init_userptr(struct drm_device *dev); +void i915_gem_cleanup_userptr(struct drm_device *dev); int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index f40d3254249a..733250afa139 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1133,6 +1133,9 @@ repeat: } } + /* A frequent cause for EAGAIN are currently unavailable client pages */ + flush_workqueue(eb->i915->mm.userptr_wq); + ret = i915_mutex_lock_interruptible(dev); if (ret) { mutex_lock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 232ce85b39db..54385f6c7e14 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -102,7 +102,8 @@ static unsigned long cancel_userptr(struct i915_mmu_object *mo) * is freed and then double free it. */ if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) { - schedule_work(&mo->work); + queue_work(to_i915(mo->obj->base.dev)->mm.userptr_wq, + &mo->work); /* only schedule one work packet to avoid the refleak */ mo->active = false; } @@ -450,7 +451,7 @@ __i915_mm_struct_free(struct kref *kref) mutex_unlock(&to_i915(mm->dev)->mm_lock); INIT_WORK(&mm->work, __i915_mm_struct_free__worker); - schedule_work(&mm->work); + queue_work(to_i915(mm->dev)->mm.userptr_wq, &mm->work); } static void @@ -664,7 +665,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, get_task_struct(work->task); INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); - schedule_work(&work->work); + queue_work(to_i915(obj->base.dev)->mm.userptr_wq, &work->work); *active = true; return -EAGAIN; @@ -886,5 +887,14 @@ i915_gem_init_userptr(struct drm_device *dev) struct drm_i915_private *dev_priv = to_i915(dev); mutex_init(&dev_priv->mm_lock); hash_init(dev_priv->mm_structs); + dev_priv->mm.userptr_wq = + alloc_workqueue("i915-userptr", WQ_HIGHPRI, 0); return 0; } + +void +i915_gem_cleanup_userptr(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + destroy_workqueue(dev_priv->mm.userptr_wq); +} -- 2.7.0.rc3 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx