On schedule-out (CS completion) of a banned context, scrub the context
image so that we do not replay the active payload. The intent is that we
skip banned payloads on request submission so that the timeline
advancement continues on in the background. However, if we are returning
to a preempted request, i915_request_skip() is ineffective and instead we
need to patch up the context image so that it continues from the start
of the next request.
v2: Fixup cancellation so that we only scrub the payload of the active
request and do not short-circuit the breadcrumbs (which might cause
other contexts to execute out of order).
Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
---
drivers/gpu/drm/i915/gt/intel_lrc.c | 129 ++++++++----
drivers/gpu/drm/i915/gt/selftest_lrc.c | 273 +++++++++++++++++++++++++
2 files changed, 361 insertions(+), 41 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index e16ede75412b..b76b35194114 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -234,6 +234,9 @@ static void execlists_init_reg_state(u32 *reg_state,
const struct intel_engine_cs *engine,
const struct intel_ring *ring,
bool close);
+static void
+__execlists_update_reg_state(const struct intel_context *ce,
+ const struct intel_engine_cs *engine);
static void cancel_timer(struct timer_list *t)
{
@@ -270,6 +273,31 @@ static void mark_eio(struct i915_request *rq)
i915_request_mark_complete(rq);
}
+static struct i915_request *active_request(struct i915_request *rq)
+{
+ const struct intel_context * const ce = rq->hw_context;
+ struct i915_request *active = NULL;
+ struct list_head *list;
+
+ if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
+ return rq;
+
+ rcu_read_lock();
+ list = &rcu_dereference(rq->timeline)->requests;
+ list_for_each_entry_from_reverse(rq, list, link) {
+ if (i915_request_completed(rq))
+ break;
+
+ if (rq->hw_context != ce)
+ break;
+
+ active = rq;
+ }
+ rcu_read_unlock();
+
+ return active;
+}
+
static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
{
return (i915_ggtt_offset(engine->status_page.vma) +
@@ -991,6 +1019,56 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
tasklet_schedule(&ve->base.execlists.tasklet);
}
+static void restore_default_state(struct intel_context *ce,
+ struct intel_engine_cs *engine)
+{
+ u32 *regs = ce->lrc_reg_state;
+
+ if (engine->pinned_default_state)
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
+
+ execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+}
+
+static void cancel_active(struct i915_request *rq,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context * const ce = rq->hw_context;
+
+ /*
+ * The executing context has been cancelled. Fixup the context so that
+ * it will be marked as incomplete [-EIO] upon resubmission and not