Space for flushing the GPU cache prior to completing the request is preallocated and so cannot fail. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_gem_context.c | 2 +- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 9 +--- drivers/gpu/drm/i915/i915_gem_gtt.c | 18 ++++---- drivers/gpu/drm/i915/i915_gem_request.c | 7 ++- drivers/gpu/drm/i915/intel_lrc.c | 47 +++---------------- drivers/gpu/drm/i915/intel_lrc.h | 2 - drivers/gpu/drm/i915/intel_ringbuffer.c | 72 +++++++----------------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 7 --- 8 files changed, 39 insertions(+), 125 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 13b934ab4a8a..9eb6ab9cb610 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -529,7 +529,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) * itlb_before_ctx_switch. */ if (IS_GEN6(dev_priv)) { - ret = req->engine->flush(req, I915_GEM_GPU_DOMAINS, 0); + ret = req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 186e466f932f..6e439f5d1674 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -968,10 +968,8 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); - /* Unconditionally invalidate gpu caches and ensure that we do flush - * any residual writes from the previous batch. - */ - return intel_engine_invalidate_all_caches(req); + /* Unconditionally invalidate gpu caches and TLBs. */ + return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0); } static bool @@ -1130,9 +1128,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, static void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) { - /* Unconditionally force add_request to emit a full flush. */ - params->engine->gpu_caches_dirty = true; - /* Add a breadcrumb for the completion of the batch buffer */ __i915_add_request(params->request, params->batch_obj, true); } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 6a6e69a3894f..5d718c488f23 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1664,9 +1664,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, int ret; /* NB: TLBs must be flushed and invalidated before a switch */ - ret = req->engine->flush(req, - I915_GEM_GPU_DOMAINS, - I915_GEM_GPU_DOMAINS); + ret = req->engine->emit_flush(req, + I915_GEM_GPU_DOMAINS, + I915_GEM_GPU_DOMAINS); if (ret) return ret; @@ -1703,9 +1703,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, int ret; /* NB: TLBs must be flushed and invalidated before a switch */ - ret = req->engine->flush(req, - I915_GEM_GPU_DOMAINS, - I915_GEM_GPU_DOMAINS); + ret = req->engine->emit_flush(req, + I915_GEM_GPU_DOMAINS, + I915_GEM_GPU_DOMAINS); if (ret) return ret; @@ -1723,9 +1723,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, /* XXX: RCS is the only one to auto invalidate the TLBs? */ if (req->engine->id != RCS) { - ret = req->engine->flush(req, - I915_GEM_GPU_DOMAINS, - I915_GEM_GPU_DOMAINS); + ret = req->engine->emit_flush(req, + I915_GEM_GPU_DOMAINS, + I915_GEM_GPU_DOMAINS); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c index 58d84b153810..b0c6e57197bb 100644 --- a/drivers/gpu/drm/i915/i915_gem_request.c +++ b/drivers/gpu/drm/i915/i915_gem_request.c @@ -443,10 +443,9 @@ void __i915_add_request(struct drm_i915_gem_request *request, * what. */ if (flush_caches) { - if (i915.enable_execlists) - ret = logical_ring_flush_all_caches(request); - else - ret = intel_engine_flush_all_caches(request); + ret = request->engine->emit_flush(request, + 0, I915_GEM_GPU_DOMAINS); + /* Not allowed to fail! */ WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret); } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index e8685ce4d2a4..55d529e5614d 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -620,24 +620,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request) spin_unlock_bh(&engine->execlist_lock); } -static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - uint32_t flush_domains; - int ret; - - flush_domains = 0; - if (engine->gpu_caches_dirty) - flush_domains = I915_GEM_GPU_DOMAINS; - - ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); - if (ret) - return ret; - - engine->gpu_caches_dirty = false; - return 0; -} - static int execlists_move_to_gpu(struct drm_i915_gem_request *req, struct list_head *vmas) { @@ -668,7 +650,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, /* Unconditionally invalidate gpu caches and ensure that we do flush * any residual writes from the previous batch. */ - return logical_ring_invalidate_all_caches(req); + return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0); } int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) @@ -911,22 +893,6 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine) I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); } -int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - int ret; - - if (!engine->gpu_caches_dirty) - return 0; - - ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS); - if (ret) - return ret; - - engine->gpu_caches_dirty = false; - return 0; -} - static int intel_lr_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine) { @@ -1007,15 +973,15 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx, static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) { int ret, i; - struct intel_engine_cs *engine = req->engine; struct intel_ring *ring = req->ring; struct i915_workarounds *w = &req->i915->workarounds; if (w->count == 0) return 0; - engine->gpu_caches_dirty = true; - ret = logical_ring_flush_all_caches(req); + ret = req->engine->emit_flush(req, + I915_GEM_GPU_DOMAINS, + I915_GEM_GPU_DOMAINS); if (ret) return ret; @@ -1032,8 +998,9 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) intel_ring_advance(ring); - engine->gpu_caches_dirty = true; - ret = logical_ring_flush_all_caches(req); + ret = req->engine->emit_flush(req, + I915_GEM_GPU_DOMAINS, + I915_GEM_GPU_DOMAINS); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index baf90543857a..87db0b6c2e76 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -64,8 +64,6 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine); void intel_logical_ring_cleanup(struct intel_engine_cs *engine); int intel_logical_rings_init(struct drm_device *dev); -int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); - /* Logical Ring Contexts */ /* One extra page is added before LRC for GuC as shared data */ diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 07c2470c24f9..41e1bd9dc61d 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -687,8 +687,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) if (w->count == 0) return 0; - req->engine->gpu_caches_dirty = true; - ret = intel_engine_flush_all_caches(req); + ret = req->engine->emit_flush(req, + I915_GEM_GPU_DOMAINS, + I915_GEM_GPU_DOMAINS); if (ret) return ret; @@ -705,8 +706,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) intel_ring_advance(ring); - req->engine->gpu_caches_dirty = true; - ret = intel_engine_flush_all_caches(req); + ret = req->engine->emit_flush(req, + I915_GEM_GPU_DOMAINS, + I915_GEM_GPU_DOMAINS); if (ret) return ret; @@ -2627,7 +2629,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->init_context = intel_rcs_ctx_init; engine->add_request = gen8_render_add_request; - engine->flush = gen8_render_ring_flush; + engine->emit_flush = gen8_render_ring_flush; engine->irq_enable = gen8_ring_enable_irq; engine->irq_disable = gen8_ring_disable_irq; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; @@ -2640,9 +2642,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev) } else if (INTEL_GEN(dev_priv) >= 6) { engine->init_context = intel_rcs_ctx_init; engine->add_request = gen6_add_request; - engine->flush = gen7_render_ring_flush; + engine->emit_flush = gen7_render_ring_flush; if (IS_GEN6(dev_priv)) - engine->flush = gen6_render_ring_flush; + engine->emit_flush = gen6_render_ring_flush; engine->irq_enable = gen6_ring_enable_irq; engine->irq_disable = gen6_ring_disable_irq; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; @@ -2670,7 +2672,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) } } else if (IS_GEN5(dev_priv)) { engine->add_request = i9xx_add_request; - engine->flush = gen4_render_ring_flush; + engine->emit_flush = gen4_render_ring_flush; engine->irq_enable = gen5_ring_enable_irq; engine->irq_disable = gen5_ring_disable_irq; engine->irq_seqno_barrier = gen5_seqno_barrier; @@ -2678,9 +2680,9 @@ int intel_init_render_ring_buffer(struct drm_device *dev) } else { engine->add_request = i9xx_add_request; if (INTEL_GEN(dev_priv) < 4) - engine->flush = gen2_render_ring_flush; + engine->emit_flush = gen2_render_ring_flush; else - engine->flush = gen4_render_ring_flush; + engine->emit_flush = gen4_render_ring_flush; if (IS_GEN2(dev_priv)) { engine->irq_enable = i8xx_ring_enable_irq; engine->irq_disable = i8xx_ring_disable_irq; @@ -2740,7 +2742,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) /* gen6 bsd needs a special wa for tail updates */ if (IS_GEN6(dev_priv)) engine->write_tail = gen6_bsd_ring_write_tail; - engine->flush = gen6_bsd_ring_flush; + engine->emit_flush = gen6_bsd_ring_flush; engine->add_request = gen6_add_request; engine->irq_seqno_barrier = gen6_seqno_barrier; if (INTEL_GEN(dev_priv) >= 8) { @@ -2778,7 +2780,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) } } else { engine->mmio_base = BSD_RING_BASE; - engine->flush = bsd_ring_flush; + engine->emit_flush = bsd_ring_flush; engine->add_request = i9xx_add_request; if (IS_GEN5(dev_priv)) { engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; @@ -2812,7 +2814,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) engine->write_tail = ring_write_tail; engine->mmio_base = GEN8_BSD2_RING_BASE; - engine->flush = gen6_bsd_ring_flush; + engine->emit_flush = gen6_bsd_ring_flush; engine->add_request = gen6_add_request; engine->irq_seqno_barrier = gen6_seqno_barrier; engine->irq_enable_mask = @@ -2843,7 +2845,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) engine->mmio_base = BLT_RING_BASE; engine->write_tail = ring_write_tail; - engine->flush = gen6_ring_flush; + engine->emit_flush = gen6_ring_flush; engine->add_request = gen6_add_request; engine->irq_seqno_barrier = gen6_seqno_barrier; if (INTEL_GEN(dev_priv) >= 8) { @@ -2901,7 +2903,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) engine->mmio_base = VEBOX_RING_BASE; engine->write_tail = ring_write_tail; - engine->flush = gen6_ring_flush; + engine->emit_flush = gen6_ring_flush; engine->add_request = gen6_add_request; engine->irq_seqno_barrier = gen6_seqno_barrier; @@ -2941,46 +2943,6 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) return intel_init_engine(dev, engine); } -int -intel_engine_flush_all_caches(struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - int ret; - - if (!engine->gpu_caches_dirty) - return 0; - - ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS); - if (ret) - return ret; - - trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS); - - engine->gpu_caches_dirty = false; - return 0; -} - -int -intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req) -{ - struct intel_engine_cs *engine = req->engine; - uint32_t flush_domains; - int ret; - - flush_domains = 0; - if (engine->gpu_caches_dirty) - flush_domains = I915_GEM_GPU_DOMAINS; - - ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains); - if (ret) - return ret; - - trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); - - engine->gpu_caches_dirty = false; - return 0; -} - void intel_engine_stop(struct intel_engine_cs *engine) { int ret; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 5403cc614095..4817a7fa2154 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -214,9 +214,6 @@ struct intel_engine_cs { void (*write_tail)(struct intel_engine_cs *ring, u32 value); - int __must_check (*flush)(struct drm_i915_gem_request *req, - u32 invalidate_domains, - u32 flush_domains); int (*add_request)(struct drm_i915_gem_request *req); /* Some chipsets are not quite as coherent as advertised and need * an expensive kick to force a true read of the up-to-date seqno. @@ -334,8 +331,6 @@ struct intel_engine_cs { u32 last_submitted_seqno; unsigned user_interrupts; - bool gpu_caches_dirty; - struct i915_gem_context *last_context; struct intel_engine_hangcheck hangcheck; @@ -479,8 +474,6 @@ void intel_ring_update_space(struct intel_ring *ring); int __must_check intel_engine_idle(struct intel_engine_cs *engine); void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno); -int intel_engine_flush_all_caches(struct drm_i915_gem_request *req); -int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req); int intel_init_pipe_control(struct intel_engine_cs *engine, int size); void intel_fini_pipe_control(struct intel_engine_cs *engine); -- 2.8.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx