[PATCH] drm/i915: Make i915_spin_request() static

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



No users now outside of i915_wait_request(), so we can make it private to
i915_gem_request.c, and assume the caller knows the seqno. In the
process, also remove i915_gem_request_started() as that was only ever
used by i915_spin_request().

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
Cc: Michal Winiarski <michal.winiarski@xxxxxxxxx>
Cc: Tvrtko Ursulin <tvrtko.ursulin@xxxxxxxxx>
Cc: Joonas Lahtinen <joonas.lahtinen@xxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_gem_request.c | 27 +++++++++++++++++++------
 drivers/gpu/drm/i915/i915_gem_request.h | 35 ---------------------------------
 2 files changed, 21 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 289fb08acaf5..4eb1a76731b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -1021,12 +1021,28 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
 	return this_cpu != cpu;
 }
 
-bool __i915_spin_request(const struct drm_i915_gem_request *req,
-			 u32 seqno, int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct drm_i915_gem_request *req,
+				u32 seqno, int state, unsigned long timeout_us)
 {
 	struct intel_engine_cs *engine = req->engine;
 	unsigned int irq, cpu;
 
+	GEM_BUG_ON(!seqno);
+
+	/*
+	 * Only wait for the request if we know it is likely to complete.
+	 *
+	 * We don't track the timestamps around requests, nor the average
+	 * request length, so we do not have a good indicator that this
+	 * request will complete within the timeout. What we do know is the
+	 * order in which requests are executed by the engine and so we can
+	 * tell if the request has started. If the request hasn't started yet,
+	 * it is a fair assumption that it will not complete within our
+	 * relatively short timeout.
+	 */
+	if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1))
+		return false;
+
 	/* When waiting for high frequency requests, e.g. during synchronous
 	 * rendering split between the CPU and GPU, the finite amount of time
 	 * required to set up the irq and wait upon it limits the response
@@ -1040,8 +1056,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 	irq = atomic_read(&engine->irq_count);
 	timeout_us += local_clock_us(&cpu);
 	do {
-		if (i915_seqno_passed(intel_engine_get_seqno(req->engine),
-				      seqno))
+		if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno))
 			return seqno == i915_gem_request_global_seqno(req);
 
 		/* Seqno are meant to be ordered *before* the interrupt. If
@@ -1153,7 +1168,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
 	GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
 
 	/* Optimistic short spin before touching IRQs */
-	if (i915_spin_request(req, state, 5))
+	if (__i915_spin_request(req, wait.seqno, state, 5))
 		goto complete;
 
 	set_current_state(state);
@@ -1210,7 +1225,7 @@ long i915_wait_request(struct drm_i915_gem_request *req,
 			continue;
 
 		/* Only spin if we know the GPU is processing this request */
-		if (i915_spin_request(req, state, 2))
+		if (__i915_spin_request(req, wait.seqno, state, 2))
 			break;
 
 		if (!intel_wait_check_request(&wait, req)) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 49a4c8994ff0..96eb52471dad 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -312,26 +312,6 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
 	return (s32)(seq1 - seq2) >= 0;
 }
 
-static inline bool
-__i915_gem_request_started(const struct drm_i915_gem_request *req, u32 seqno)
-{
-	GEM_BUG_ON(!seqno);
-	return i915_seqno_passed(intel_engine_get_seqno(req->engine),
-				 seqno - 1);
-}
-
-static inline bool
-i915_gem_request_started(const struct drm_i915_gem_request *req)
-{
-	u32 seqno;
-
-	seqno = i915_gem_request_global_seqno(req);
-	if (!seqno)
-		return false;
-
-	return __i915_gem_request_started(req, seqno);
-}
-
 static inline bool
 __i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
 {
@@ -352,21 +332,6 @@ i915_gem_request_completed(const struct drm_i915_gem_request *req)
 	return __i915_gem_request_completed(req, seqno);
 }
 
-bool __i915_spin_request(const struct drm_i915_gem_request *request,
-			 u32 seqno, int state, unsigned long timeout_us);
-static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
-				     int state, unsigned long timeout_us)
-{
-	u32 seqno;
-
-	seqno = i915_gem_request_global_seqno(request);
-	if (!seqno)
-		return 0;
-
-	return (__i915_gem_request_started(request, seqno) &&
-		__i915_spin_request(request, seqno, state, timeout_us));
-}
-
 /* We treat requests as fences. This is not be to confused with our
  * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
  * We use the fences to synchronize access from the CPU with activity on the
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux