Re: [PATCH v5 14/35] drm/i915: Keep the reserved space mechanism happy

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 02/18/2016 06:27 AM, John.C.Harrison@xxxxxxxxx wrote:
> From: John Harrison <John.C.Harrison@xxxxxxxxx>
> 
> Ring space is reserved when constructing a request to ensure that the
> subsequent 'add_request()' call cannot fail due to waiting for space
> on a busy or broken GPU. However, the scheduler jumps in to the middle
> of the execbuffer process between request creation and request
> submission. Thus it needs to cancel the reserved space when the
> request is simply added to the scheduler's queue and not yet
> submitted. Similarly, it needs to re-reserve the space when it finally
> does want to send the batch buffer to the hardware.
> 
> v3: Updated to use locally cached request pointer.
> 
> v5: Updated due to changes to earlier patches in series - for runtime
> PM calls and splitting bypass mode into a separate function.
> 
> For: VIZ-1587
> Signed-off-by: John Harrison <John.C.Harrison@xxxxxxxxx>
> ---
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c | 20 ++++++++++++++------
>  drivers/gpu/drm/i915/i915_scheduler.c      |  4 ++++
>  drivers/gpu/drm/i915/intel_lrc.c           | 13 +++++++++++--
>  3 files changed, 29 insertions(+), 8 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 09c5ce9..11bea8d 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1295,18 +1295,22 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
>  	/* The mutex must be acquired before calling this function */
>  	WARN_ON(!mutex_is_locked(&params->dev->struct_mutex));
>  
> +	ret = intel_ring_reserve_space(req);
> +	if (ret)
> +		goto error;
> +
>  	/*
>  	 * Unconditionally invalidate gpu caches and ensure that we do flush
>  	 * any residual writes from the previous batch.
>  	 */
>  	ret = intel_ring_invalidate_all_caches(req);
>  	if (ret)
> -		return ret;
> +		goto error;
>  
>  	/* Switch to the correct context for the batch */
>  	ret = i915_switch_context(req);
>  	if (ret)
> -		return ret;
> +		goto error;
>  
>  	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
>  	     "%s didn't clear reload\n", ring->name);
> @@ -1315,7 +1319,7 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
>  	    params->instp_mode != dev_priv->relative_constants_mode) {
>  		ret = intel_ring_begin(req, 4);
>  		if (ret)
> -			return ret;
> +			goto error;
>  
>  		intel_ring_emit(ring, MI_NOOP);
>  		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
> @@ -1329,7 +1333,7 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
>  	if (params->args_flags & I915_EXEC_GEN7_SOL_RESET) {
>  		ret = i915_reset_gen7_sol_offsets(params->dev, req);
>  		if (ret)
> -			return ret;
> +			goto error;
>  	}
>  
>  	exec_len   = params->args_batch_len;
> @@ -1343,13 +1347,17 @@ int i915_gem_ringbuffer_submission_final(struct i915_execbuffer_params *params)
>  					exec_start, exec_len,
>  					params->dispatch_flags);
>  	if (ret)
> -		return ret;
> +		goto error;
>  
>  	trace_i915_gem_ring_dispatch(req, params->dispatch_flags);
>  
>  	i915_gem_execbuffer_retire_commands(params);
>  
> -	return 0;
> +error:
> +	if (ret)
> +		intel_ring_reserved_space_cancel(req->ringbuf);
> +
> +	return ret;
>  }
>  
>  /**
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index 3986890..a3ffd04 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -483,6 +483,8 @@ static int i915_scheduler_queue_execbuffer_bypass(struct i915_scheduler_queue_en
>  	struct i915_scheduler *scheduler = dev_priv->scheduler;
>  	int ret;
>  
> +	intel_ring_reserved_space_cancel(qe->params.request->ringbuf);
> +
>  	scheduler->flags[qe->params.ring->id] |= i915_sf_submitting;
>  	ret = dev_priv->gt.execbuf_final(&qe->params);
>  	scheduler->flags[qe->params.ring->id] &= ~i915_sf_submitting;
> @@ -539,6 +541,8 @@ int i915_scheduler_queue_execbuffer(struct i915_scheduler_queue_entry *qe)
>  	node->stamp  = jiffies;
>  	i915_gem_request_reference(node->params.request);
>  
> +	intel_ring_reserved_space_cancel(node->params.request->ringbuf);
> +
>  	WARN_ON(node->params.request->scheduler_qe);
>  	node->params.request->scheduler_qe = node;
>  
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index ff4565f..f4bab82 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -978,13 +978,17 @@ int intel_execlists_submission_final(struct i915_execbuffer_params *params)
>  	/* The mutex must be acquired before calling this function */
>  	WARN_ON(!mutex_is_locked(&params->dev->struct_mutex));
>  
> +	ret = intel_logical_ring_reserve_space(req);
> +	if (ret)
> +		goto err;
> +
>  	/*
>  	 * Unconditionally invalidate gpu caches and ensure that we do flush
>  	 * any residual writes from the previous batch.
>  	 */
>  	ret = logical_ring_invalidate_all_caches(req);
>  	if (ret)
> -		return ret;
> +		goto err;
>  
>  	if (ring == &dev_priv->ring[RCS] &&
>  	    params->instp_mode != dev_priv->relative_constants_mode) {
> @@ -1006,13 +1010,18 @@ int intel_execlists_submission_final(struct i915_execbuffer_params *params)
>  
>  	ret = ring->emit_bb_start(req, exec_start, params->dispatch_flags);
>  	if (ret)
> -		return ret;
> +		goto err;
>  
>  	trace_i915_gem_ring_dispatch(req, params->dispatch_flags);
>  
>  	i915_gem_execbuffer_retire_commands(params);
>  
>  	return 0;
> +
> +err:
> +	intel_ring_reserved_space_cancel(params->request->ringbuf);
> +
> +	return ret;
>  }
>  
>  void intel_execlists_retire_requests(struct intel_engine_cs *ring)
> 

I had to double check that it was ok to cancel space if the reserve failed (I guess it is), so seems ok.

Reviewed-by: Jesse Barnes <jbarnes@xxxxxxxxxxxxxxxx>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux