From: John Harrison <John.C.Harrison@xxxxxxxxx> The scheduler can cause batch buffers, and hence requests, to be submitted to the ring out of order and asynchronously to their submission to the driver. Thus at the point of waiting for the completion of a given request, it is not even guaranteed that the request has actually been sent to the hardware yet. Even it is has been sent, it is possible that it could be pre-empted and thus 'unsent'. This means that it is necessary to be able to submit requests to the hardware during the wait call itself. Unfortunately, while some callers of __wait_request() release the mutex lock first, others do not (and apparently can not). Hence there is the ability to deadlock as the wait stalls for submission but the asynchronous submission is stalled for the mutex lock. This change hooks the scheduler in to the __wait_request() code to ensure correct behaviour. That is, flush the target batch buffer through to the hardware and do not deadlock waiting for something that cannot currently be submitted. Instead, the wait call must return EAGAIN at least as far back as necessary to release the mutex lock and allow the scheduler's asynchronous processing to get in and handle the pre-emption operation and eventually (re-)submit the work. Change-Id: I31fe6bc7e38f6ffdd843fcae16e7cc8b1e52a931 For: VIZ-1587 Signed-off-by: John Harrison <John.C.Harrison@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 3 +- drivers/gpu/drm/i915/i915_gem.c | 37 +++++++++-- drivers/gpu/drm/i915/i915_scheduler.c | 111 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_scheduler.h | 4 ++ drivers/gpu/drm/i915/intel_display.c | 5 +- drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +- 6 files changed, 151 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9a67f7c..5ed600c 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3029,7 +3029,8 @@ int __i915_wait_request(struct drm_i915_gem_request *req, unsigned reset_counter, bool interruptible, s64 *timeout, - struct intel_rps_client *rps); + struct intel_rps_client *rps, + bool is_locked); int __must_check i915_wait_request(struct drm_i915_gem_request *req); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int __must_check diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 451ae6d..3994b47 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1207,7 +1207,8 @@ int __i915_wait_request(struct drm_i915_gem_request *req, unsigned reset_counter, bool interruptible, s64 *timeout, - struct intel_rps_client *rps) + struct intel_rps_client *rps, + bool is_locked) { struct intel_engine_cs *ring = i915_gem_request_get_ring(req); struct drm_device *dev = ring->dev; @@ -1217,8 +1218,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req, DEFINE_WAIT(wait); unsigned long timeout_expire; s64 before, now; - int ret; + int ret = 0; + bool busy; + might_sleep(); WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); if (i915_gem_request_completed(req)) @@ -1269,6 +1272,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req, break; } + if (is_locked) { + /* If this request is being processed by the scheduler + * then it is unsafe to sleep with the mutex lock held + * as the scheduler may require the lock in order to + * progress the request. */ + if (i915_scheduler_is_request_tracked(req, NULL, &busy)) { + if (busy) { + ret = -EAGAIN; + break; + } + } + + /* If the request is not tracked by the scheduler then the + * regular test can be done. */ + } + if (i915_gem_request_completed(req)) { ret = 0; break; @@ -1449,13 +1468,17 @@ i915_wait_request(struct drm_i915_gem_request *req) BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + ret = i915_scheduler_flush_request(req, true); + if (ret < 0) + return ret; + ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); if (ret) return ret; ret = __i915_wait_request(req, atomic_read(&dev_priv->gpu_error.reset_counter), - interruptible, NULL, NULL); + interruptible, NULL, NULL, true); if (ret) return ret; @@ -1568,7 +1591,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, mutex_unlock(&dev->struct_mutex); for (i = 0; ret == 0 && i < n; i++) ret = __i915_wait_request(requests[i], reset_counter, true, - NULL, rps); + NULL, rps, false); mutex_lock(&dev->struct_mutex); for (i = 0; i < n; i++) { @@ -3493,7 +3516,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) if (ret == 0) ret = __i915_wait_request(req[i], reset_counter, true, args->timeout_ns > 0 ? &args->timeout_ns : NULL, - file->driver_priv); + file->driver_priv, false); i915_gem_request_unreference(req[i]); } return ret; @@ -3526,7 +3549,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, atomic_read(&i915->gpu_error.reset_counter), i915->mm.interruptible, NULL, - &i915->rps.semaphores); + &i915->rps.semaphores, true); if (ret) return ret; @@ -4485,7 +4508,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) if (target == NULL) return 0; - ret = __i915_wait_request(target, reset_counter, true, NULL, NULL); + ret = __i915_wait_request(target, reset_counter, true, NULL, NULL, false); if (ret == 0) queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 5dc3497..b8b84a6 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -31,6 +31,8 @@ static int i915_scheduler_remove_dependent(struct i915_scheduler *schedu struct i915_scheduler_queue_entry *remove); static int i915_scheduler_submit(struct intel_engine_cs *ring, bool is_locked); +static int i915_scheduler_submit_max_priority(struct intel_engine_cs *ring, + bool is_locked); static uint32_t i915_scheduler_count_flying(struct i915_scheduler *scheduler, struct intel_engine_cs *ring); static void i915_scheduler_priority_bump_clear(struct i915_scheduler *scheduler); @@ -554,6 +556,57 @@ void i915_gem_scheduler_work_handler(struct work_struct *work) } } +int i915_scheduler_flush_request(struct drm_i915_gem_request *req, + bool is_locked) +{ + struct drm_i915_private *dev_priv; + struct i915_scheduler *scheduler; + unsigned long flags; + int flush_count; + uint32_t ring_id; + + if (!req) + return -EINVAL; + + dev_priv = req->ring->dev->dev_private; + scheduler = dev_priv->scheduler; + + if (!scheduler) + return 0; + + if (!req->scheduler_qe) + return 0; + + if (!I915_SQS_IS_QUEUED(req->scheduler_qe)) + return 0; + + ring_id = req->ring->id; + if (is_locked && (scheduler->flags[ring_id] & i915_sf_submitting)) { + /* Scheduler is busy already submitting another batch, + * come back later rather than going recursive... */ + return -EAGAIN; + } + + if (list_empty(&scheduler->node_queue[ring_id])) + return 0; + + spin_lock_irqsave(&scheduler->lock, flags); + + i915_scheduler_priority_bump_clear(scheduler); + + flush_count = i915_scheduler_priority_bump(scheduler, + req->scheduler_qe, scheduler->priority_level_max); + + spin_unlock_irqrestore(&scheduler->lock, flags); + + if (flush_count) { + DRM_DEBUG_DRIVER("<%s> Bumped %d entries\n", req->ring->name, flush_count); + flush_count = i915_scheduler_submit_max_priority(req->ring, is_locked); + } + + return flush_count; +} + static void i915_scheduler_priority_bump_clear(struct i915_scheduler *scheduler) { struct i915_scheduler_queue_entry *node; @@ -607,6 +660,44 @@ static int i915_scheduler_priority_bump(struct i915_scheduler *scheduler, return count; } +static int i915_scheduler_submit_max_priority(struct intel_engine_cs *ring, + bool is_locked) +{ + struct i915_scheduler_queue_entry *node; + struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct i915_scheduler *scheduler = dev_priv->scheduler; + unsigned long flags; + int ret, count = 0; + bool found; + + do { + found = false; + spin_lock_irqsave(&scheduler->lock, flags); + list_for_each_entry(node, &scheduler->node_queue[ring->id], link) { + if (!I915_SQS_IS_QUEUED(node)) + continue; + + if (node->priority < scheduler->priority_level_max) + continue; + + found = true; + break; + } + spin_unlock_irqrestore(&scheduler->lock, flags); + + if (!found) + break; + + ret = i915_scheduler_submit(ring, is_locked); + if (ret < 0) + return ret; + + count += ret; + } while (found); + + return count; +} + static int i915_scheduler_pop_from_queue_locked(struct intel_engine_cs *ring, struct i915_scheduler_queue_entry **pop_node, unsigned long *flags) @@ -821,6 +912,26 @@ static int i915_scheduler_remove_dependent(struct i915_scheduler *scheduler, return 0; } +bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req, + bool *completed, bool *busy) +{ + struct drm_i915_private *dev_priv = req->ring->dev->dev_private; + struct i915_scheduler *scheduler = dev_priv->scheduler; + + if (!scheduler) + return false; + + if (req->scheduler_qe == NULL) + return false; + + if (completed) + *completed = I915_SQS_IS_COMPLETE(req->scheduler_qe); + if (busy) + *busy = I915_SQS_IS_QUEUED(req->scheduler_qe); + + return true; +} + int i915_scheduler_closefile(struct drm_device *dev, struct drm_file *file) { struct i915_scheduler_queue_entry *node; diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index 8469270..42371b3 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -92,5 +92,9 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *q bool i915_scheduler_notify_request(struct drm_i915_gem_request *req); void i915_scheduler_wakeup(struct drm_device *dev); void i915_gem_scheduler_work_handler(struct work_struct *work); +int i915_scheduler_flush_request(struct drm_i915_gem_request *req, + bool is_locked); +bool i915_scheduler_is_request_tracked(struct drm_i915_gem_request *req, + bool *completed, bool *busy); #endif /* _I915_SCHEDULER_H_ */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index c00bc50..58c464f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11258,7 +11258,8 @@ static void intel_mmio_flip_work_func(struct work_struct *work) WARN_ON(__i915_wait_request(mmio_flip->req, mmio_flip->crtc->reset_counter, false, NULL, - &mmio_flip->i915->rps.mmioflips)); + &mmio_flip->i915->rps.mmioflips, + false)); i915_gem_request_unreference(mmio_flip->req); } @@ -13258,7 +13259,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev, ret = __i915_wait_request(intel_plane_state->wait_req, reset_counter, true, - NULL, NULL); + NULL, NULL, false); /* Swallow -EIO errors to allow updates during hw lockup. */ if (ret == -EIO) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 34fbe68..4d77886 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2294,7 +2294,7 @@ int intel_ring_idle(struct intel_engine_cs *ring) return __i915_wait_request(req, atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter), to_i915(ring->dev)->mm.interruptible, - NULL, NULL); + NULL, NULL, true); } int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx