[PATCH] drm/i915: Check return value of i915_request_mark_eio before calling put

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



i915_request_mark_eio can return NULL and in some cases this return
value was unconditionally passed to i915_request_put. i915_request_put
in turn calls dma_fence_put on &rq->fence. dma_fence_put checks for NULL
and short circuits. This all just happens to work because rq->fence is
first member in the i915_request structure. Even though this works it is
all rather dodgy, be safe and check the return of i915_request_mark_eio
before calling i915_request_put.

Signed-off-by: Matthew Brost <matthew.brost@xxxxxxxxx>
---
 drivers/gpu/drm/i915/gt/intel_execlists_submission.c | 6 ++++--
 drivers/gpu/drm/i915/gt/intel_ring_submission.c      | 3 ++-
 drivers/gpu/drm/i915/gt/mock_engine.c                | 3 ++-
 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c    | 9 ++++++---
 4 files changed, 14 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index ca03880fa7e4..fb7e64f78722 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -3073,7 +3073,8 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 
 	/* Mark all executing requests as skipped. */
 	list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
-		i915_request_put(i915_request_mark_eio(rq));
+		if (i915_request_mark_eio(rq))
+			i915_request_put(rq);
 	intel_engine_signal_breadcrumbs(engine);
 
 	/* Flush the queued requests to the timeline list (for retiring). */
@@ -3093,7 +3094,8 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 
 	/* On-hold requests will be flushed to timeline upon their release */
 	list_for_each_entry(rq, &sched_engine->hold, sched.link)
-		i915_request_put(i915_request_mark_eio(rq));
+		if (i915_request_mark_eio(rq))
+			i915_request_put(rq);
 
 	/* Cancel all attached virtual engines */
 	while ((rb = rb_first_cached(&execlists->virtual))) {
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 586dca1731ce..fc73f8744758 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -419,7 +419,8 @@ static void reset_cancel(struct intel_engine_cs *engine)
 
 	/* Mark all submitted requests as skipped. */
 	list_for_each_entry(request, &engine->sched_engine->requests, sched.link)
-		i915_request_put(i915_request_mark_eio(request));
+		if (i915_request_mark_eio(request))
+			i915_request_put(request);
 	intel_engine_signal_breadcrumbs(engine);
 
 	/* Remaining _unready_ requests will be nop'ed when submitted */
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 8b89215afe46..aae5e8dadd26 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -285,7 +285,8 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
 
 	/* Mark all submitted requests as skipped. */
 	list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
-		i915_request_put(i915_request_mark_eio(rq));
+		if (i915_request_mark_eio(rq))
+			i915_request_put(rq);
 	intel_engine_signal_breadcrumbs(engine);
 
 	/* Cancel and submit all pending requests. */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index d7e49f7c1dba..42bd2a8c6751 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -642,7 +642,8 @@ static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 	 * request resubmitted after the context was banned.
 	 */
 	if (unlikely(intel_context_is_banned(ce))) {
-		i915_request_put(i915_request_mark_eio(rq));
+		if (i915_request_mark_eio(rq))
+			i915_request_put(rq);
 		intel_engine_signal_breadcrumbs(ce->engine);
 		return 0;
 	}
@@ -1638,7 +1639,8 @@ static void guc_cancel_context_requests(struct intel_context *ce)
 	spin_lock_irqsave(&sched_engine->lock, flags);
 	spin_lock(&ce->guc_state.lock);
 	list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
-		i915_request_put(i915_request_mark_eio(rq));
+		if (i915_request_mark_eio(rq))
+			i915_request_put(rq);
 	spin_unlock(&ce->guc_state.lock);
 	spin_unlock_irqrestore(&sched_engine->lock, flags);
 }
@@ -1679,7 +1681,8 @@ guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
 
 			__i915_request_submit(rq);
 
-			i915_request_put(i915_request_mark_eio(rq));
+			if (i915_request_mark_eio(rq))
+				i915_request_put(rq);
 		}
 
 		rb_erase_cached(&p->node, &sched_engine->queue);
-- 
2.33.1




[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux