From: John Harrison <John.C.Harrison@xxxxxxxxx> Now that all callers of i915_add_request() have a request pointer to hand, it is possible to update the add request function to take a request pointer rather than pulling it out of the OLR. For: VIZ-5115 Signed-off-by: John Harrison <John.C.Harrison@xxxxxxxxx> Reviewed-by: Tomas Elf <tomas.elf@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 10 +++++----- drivers/gpu/drm/i915/i915_gem.c | 22 +++++++++++----------- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- drivers/gpu/drm/i915/intel_display.c | 2 +- drivers/gpu/drm/i915/intel_lrc.c | 2 +- drivers/gpu/drm/i915/intel_overlay.c | 4 ++-- drivers/gpu/drm/i915/intel_ringbuffer.c | 3 ++- 7 files changed, 23 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 650e5c7..f517fcc 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2863,14 +2863,14 @@ void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev); -void __i915_add_request(struct intel_engine_cs *ring, +void __i915_add_request(struct drm_i915_gem_request *req, struct drm_file *file, struct drm_i915_gem_object *batch_obj, bool flush_caches); -#define i915_add_request(ring) \ - __i915_add_request(ring, NULL, NULL, true) -#define i915_add_request_no_flush(ring) \ - __i915_add_request(ring, NULL, NULL, false) +#define i915_add_request(req) \ + __i915_add_request(req, NULL, NULL, true) +#define i915_add_request_no_flush(req) \ + __i915_add_request(req, NULL, NULL, false) int __i915_wait_request(struct drm_i915_gem_request *req, unsigned reset_counter, bool interruptible, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index db90043..12078bd 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1158,7 +1158,7 @@ i915_gem_check_olr(struct drm_i915_gem_request *req) WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); if (req == req->ring->outstanding_lazy_request) - i915_add_request(req->ring); + i915_add_request(req); return 0; } @@ -2468,25 +2468,25 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) * request is not being tracked for completion but the work itself is * going to happen on the hardware. This would be a Bad Thing(tm). */ -void __i915_add_request(struct intel_engine_cs *ring, +void __i915_add_request(struct drm_i915_gem_request *request, struct drm_file *file, struct drm_i915_gem_object *obj, bool flush_caches) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; - struct drm_i915_gem_request *request; + struct intel_engine_cs *ring; + struct drm_i915_private *dev_priv; struct intel_ringbuffer *ringbuf; u32 request_start; int ret; - request = ring->outstanding_lazy_request; if (WARN_ON(request == NULL)) return; - if (i915.enable_execlists) { - ringbuf = request->ctx->engine[ring->id].ringbuf; - } else - ringbuf = ring->buffer; + ring = request->ring; + dev_priv = ring->dev->dev_private; + ringbuf = request->ringbuf; + + WARN_ON(request != ring->outstanding_lazy_request); /* * To ensure that this call will not fail, space for its emissions @@ -3344,7 +3344,7 @@ int i915_gpu_idle(struct drm_device *dev) return ret; } - i915_add_request_no_flush(req->ring); + i915_add_request_no_flush(req); } WARN_ON(ring->outstanding_lazy_request); @@ -5128,7 +5128,7 @@ i915_gem_init_hw(struct drm_device *dev) goto out; } - i915_add_request_no_flush(ring); + i915_add_request_no_flush(req); } out: diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index bea92ad..7533fb3 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1058,7 +1058,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) params->ring->gpu_caches_dirty = true; /* Add a breadcrumb for the completion of the batch buffer */ - __i915_add_request(params->ring, params->file, params->batch_obj, true); + __i915_add_request(params->request, params->file, params->batch_obj, true); } static int diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a1ac557..811ff0a 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -11264,7 +11264,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, } if (request) - i915_add_request_no_flush(request->ring); + i915_add_request_no_flush(request); work->flip_queued_vblank = drm_crtc_vblank_count(crtc); work->enable_stall_check = true; diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index f8e8fdb..521ecfd 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1983,7 +1983,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx, goto error; } - i915_add_request_no_flush(req->ring); + i915_add_request_no_flush(req); } ctx->rcs_initialized = true; diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 3adb63e..3f70904 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -217,7 +217,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay, WARN_ON(overlay->last_flip_req); i915_gem_request_assign(&overlay->last_flip_req, req); - i915_add_request(req->ring); + i915_add_request(req); overlay->flip_tail = tail; ret = i915_wait_request(overlay->last_flip_req); @@ -299,7 +299,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, WARN_ON(overlay->last_flip_req); i915_gem_request_assign(&overlay->last_flip_req, req); - i915_add_request(req->ring); + i915_add_request(req); return 0; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index ea91a33..978984e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2157,8 +2157,9 @@ int intel_ring_idle(struct intel_engine_cs *ring) struct drm_i915_gem_request *req; /* We need to add any requests required to flush the objects and ring */ + WARN_ON(ring->outstanding_lazy_request); if (ring->outstanding_lazy_request) - i915_add_request(ring); + i915_add_request(ring->outstanding_lazy_request); /* Wait upon the last request to be completed */ if (list_empty(&ring->request_list)) -- 1.7.9.5 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx