Since we process schedule-in of a context after submitting the request, if we decide to reset the context at that time, we also have to cancel the requets we have marked for submission. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/gt/intel_execlists_submission.c | 8 ++++---- drivers/gpu/drm/i915/i915_request.c | 2 ++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index 1fae6c6f3868..fd7ac25605b2 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -219,14 +219,14 @@ active_request(const struct intel_timeline * const tl, struct i915_request *rq) { struct i915_request *active = rq; - rcu_read_lock(); - list_for_each_entry_continue_reverse(rq, &tl->requests, link) { + list_for_each_entry_from_reverse(rq, &tl->requests, link) { if (__i915_request_is_complete(rq)) break; + i915_request_set_error_once(rq, -EIO); + __i915_request_skip(rq); active = rq; } - rcu_read_unlock(); return active; } @@ -487,7 +487,7 @@ static void reset_active(struct i915_request *rq, * remain correctly ordered. And we defer to __i915_request_submit() * so that all asynchronous waits are correctly handled. */ - ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n", + ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n", rq->fence.context, rq->fence.seqno); /* On resubmission of the active request, payload will be scrubbed */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 6578faf6eed8..ad3b6a4f424f 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -490,6 +490,8 @@ void __i915_request_skip(struct i915_request *rq) if (rq->infix == rq->postfix) return; + RQ_TRACE(rq, "error: %d\n", rq->fence.error); + /* * As this request likely depends on state from the lost * context, clear out all the user operations leaving the -- 2.20.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx