From: John Harrison <John.C.Harrison@xxxxxxxxx> Ring space is reserved when constructing a request to ensure that the subsequent 'add_request()' call cannot fail due to waiting for space on a busy or broken GPU. However, the scheduler jumps in to the middle of the execbuffer process between request creation and request submission. Thus it needs to cancel the reserved space when the request is simply added to the scheduler's queue and not yet submitted. Similarly, it needs to re-reserve the space when it finally does want to send the batch buffer to the hardware. For: VIZ-1587 Signed-off-by: John Harrison <John.C.Harrison@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 7 +++++++ drivers/gpu/drm/i915/i915_scheduler.c | 4 ++++ drivers/gpu/drm/i915/intel_lrc.c | 13 +++++++++++-- 3 files changed, 22 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index cd4aa64..12b34d7 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1248,6 +1248,10 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params) /* The mutex must be acquired before calling this function */ BUG_ON(!mutex_is_locked(¶ms->dev->struct_mutex)); + ret = intel_ring_reserve_space(params->request); + if (ret) + return ret; + intel_runtime_pm_get(dev_priv); /* @@ -1308,6 +1312,9 @@ error: */ intel_runtime_pm_put(dev_priv); + if (ret) + intel_ring_reserved_space_cancel(params->request->ringbuf); + return ret; } diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 7521338..79d02d7 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -91,6 +91,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe) if (1/*i915.scheduler_override & i915_so_direct_submit*/) { int ret; + intel_ring_reserved_space_cancel(qe->params.request->ringbuf); + scheduler->flags[qe->params.ring->id] |= i915_sf_submitting; ret = dev_priv->gt.execbuf_final(&qe->params); scheduler->flags[qe->params.ring->id] &= ~i915_sf_submitting; @@ -120,6 +122,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe) node->stamp = jiffies; i915_gem_request_reference(node->params.request); + intel_ring_reserved_space_cancel(node->params.request->ringbuf); + BUG_ON(node->params.request->scheduler_qe); node->params.request->scheduler_qe = node; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 2f49fb9..16c5302 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -932,13 +932,17 @@ int intel_execlists_submission_final(struct i915_execbuffer_params *params) /* The mutex must be acquired before calling this function */ BUG_ON(!mutex_is_locked(¶ms->dev->struct_mutex)); + ret = intel_logical_ring_reserve_space(params->request); + if (ret) + return ret; + /* * Unconditionally invalidate gpu caches and ensure that we do flush * any residual writes from the previous batch. */ ret = logical_ring_invalidate_all_caches(params->request); if (ret) - return ret; + goto err; if (ring == &dev_priv->ring[RCS] && params->instp_mode != dev_priv->relative_constants_mode) { @@ -962,13 +966,18 @@ int intel_execlists_submission_final(struct i915_execbuffer_params *params) ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags); if (ret) - return ret; + goto err; trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); i915_gem_execbuffer_retire_commands(params); return 0; + +err: + intel_ring_reserved_space_cancel(params->request->ringbuf); + + return ret; } void intel_execlists_retire_requests(struct intel_engine_cs *ring) -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx