On completion of a banned context, scrub the context image so that we do not replay the active payload. The intent is that we skip banned payloads on request submission so that the timeline advancement continues on in the background. However, if we are returning to a preempted request, i915_request_skip() is ineffective and instead we need to patch up the context image so that it continues from the start of the next request. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gt/intel_lrc.c | 78 +++++++++++++++++++++++++++-- 1 file changed, 73 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 0518828129aa..0ea2f4d7dc43 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -234,6 +234,9 @@ static void execlists_init_reg_state(u32 *reg_state, const struct intel_engine_cs *engine, const struct intel_ring *ring, bool close); +static void +__execlists_update_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine); static void mark_eio(struct i915_request *rq) { @@ -1012,6 +1015,69 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce) tasklet_schedule(&ve->base.execlists.tasklet); } +static void +mark_complete(struct i915_request *rq, struct intel_engine_cs *engine) +{ + const struct intel_timeline * const tl = rcu_dereference(rq->timeline); + + *(u32 *)tl->hwsp_seqno = rq->fence.seqno; + GEM_BUG_ON(!i915_request_completed(rq)); + + list_for_each_entry_from_reverse(rq, &tl->requests, link) { + if (i915_request_signaled(rq)) + break; + + mark_eio(rq); + } + + intel_engine_queue_breadcrumbs(engine); +} + +static void __context_pin_acquire(struct intel_context *ce) +{ + GEM_BUG_ON(!intel_context_is_pinned(ce)); + mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _RET_IP_); +} + +static void __context_pin_release(struct intel_context *ce) +{ + mutex_release(&ce->pin_mutex.dep_map, 0, _RET_IP_); +} + +static void cancel_active(struct i915_request *rq, + struct intel_engine_cs *engine) +{ + struct intel_context * const ce = rq->hw_context; + u32 *regs = ce->lrc_reg_state; + + if (i915_request_completed(rq)) + return; + + GEM_TRACE("%s(%s): { rq=%llx:%lld }\n", + __func__, engine->name, rq->fence.context, rq->fence.seqno); + __context_pin_acquire(ce); + + /* Scrub the context image to prevent replaying the previous batch */ + memcpy(regs, /* skip restoring the vanilla PPHWSP */ + engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, + engine->context_size - PAGE_SIZE); + execlists_init_reg_state(regs, ce, engine, ce->ring, false); + + /* Ring will be advanced on retire; here we need to reset the context */ + ce->ring->head = intel_ring_wrap(ce->ring, rq->wa_tail); + __execlists_update_reg_state(ce, engine); + + /* We've switched away, so this should be a no-op, but intent matters */ + ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; + + /* Let everyone know that the request may now be retired */ + rcu_read_lock(); + mark_complete(rq, engine); + rcu_read_unlock(); + + __context_pin_release(ce); +} + static inline void __execlists_schedule_out(struct i915_request *rq, struct intel_engine_cs * const engine) @@ -1022,6 +1088,9 @@ __execlists_schedule_out(struct i915_request *rq, execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); intel_gt_pm_put(engine->gt); + if (unlikely(i915_gem_context_is_banned(ce->gem_context))) + cancel_active(rq, engine); + /* * If this is part of a virtual engine, its next request may * have been blocked waiting for access to the active context. @@ -2833,7 +2902,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); /* Proclaim we have exclusive access to the context image! */ - mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _THIS_IP_); + __context_pin_acquire(ce); rq = active_request(rq); if (!rq) { @@ -2897,7 +2966,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) __execlists_reset_reg_state(ce, engine); __execlists_update_reg_state(ce, engine); ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ - mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_); + __context_pin_release(ce); unwind: /* Push back any incomplete requests for replay after the reset. */ @@ -4522,8 +4591,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine, u32 head, bool scrub) { - GEM_BUG_ON(!intel_context_is_pinned(ce)); - mutex_acquire(&ce->pin_mutex.dep_map, 2, 0, _THIS_IP_); + __context_pin_acquire(ce); /* * We want a simple context + ring to execute the breadcrumb update. @@ -4549,7 +4617,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine, intel_ring_update_space(ce->ring); __execlists_update_reg_state(ce, engine); - mutex_release(&ce->pin_mutex.dep_map, 0, _THIS_IP_); + __context_pin_release(ce); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -- 2.23.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx