[PATCH 18/18] drm/i915: Simplify calling engine->sync_to

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Since requests can no longer be generated as a side-effect of
intel_ring_begin(), we know that the seqno will be unchanged during
ring-emission. This predicatablity then means we do not have to check
for the seqno wrapping around whilst emitting the semaphore for
engine->sync_to().

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_drv.h         |  2 +-
 drivers/gpu/drm/i915/i915_gem.c         | 13 ++-----
 drivers/gpu/drm/i915/i915_gem_request.c |  9 +----
 drivers/gpu/drm/i915/intel_ringbuffer.c | 64 ++++++++++++---------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  5 ++-
 5 files changed, 30 insertions(+), 63 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f188c9a9b746..c374b8687d87 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1757,7 +1757,7 @@ struct drm_i915_private {
 	struct i915_gem_context *kernel_context;
 	struct intel_engine_cs engine[I915_NUM_ENGINES];
 	struct drm_i915_gem_object *semaphore_obj;
-	uint32_t last_seqno, next_seqno;
+	u32 next_seqno;
 
 	struct drm_dma_handle *status_page_dmah;
 	struct resource mch_res;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9fdecef34fa8..0b7a0e6f9dd1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2864,22 +2864,15 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		i915_gem_object_retire_request(obj, from);
 	} else {
 		int idx = intel_engine_sync_index(from->engine, to->engine);
-		u32 seqno = i915_gem_request_get_seqno(from);
-
-		if (seqno <= from->engine->semaphore.sync_seqno[idx])
+		if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
 			return 0;
 
 		trace_i915_gem_ring_sync_to(to, from);
-		ret = to->engine->semaphore.sync_to(to, from->engine, seqno);
+		ret = to->engine->semaphore.sync_to(to, from);
 		if (ret)
 			return ret;
 
-		/* We use last_read_req because sync_to()
-		 * might have just caused seqno wrap under
-		 * the radar.
-		 */
-		from->engine->semaphore.sync_seqno[idx] =
-			i915_gem_request_get_seqno(obj->last_read_req[from->engine->id]);
+		from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 3e633b47213c..dfdb86c8a433 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -264,14 +264,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
 	if (ret)
 		return ret;
 
-	/* Carefully set the last_seqno value so that wrap
-	 * detection still works
-	 */
 	dev_priv->next_seqno = seqno;
-	dev_priv->last_seqno = seqno - 1;
-	if (dev_priv->last_seqno == 0)
-		dev_priv->last_seqno--;
-
 	return 0;
 }
 
@@ -288,7 +281,7 @@ static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
 		dev_priv->next_seqno = 1;
 	}
 
-	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
+	*seqno = dev_priv->next_seqno++;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8ae25bcc876e..bfeb16025327 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1494,12 +1494,6 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
-					      u32 seqno)
-{
-	return dev_priv->last_seqno < seqno;
-}
-
 /**
  * intel_ring_sync - sync the waiter to the signaller on seqno
  *
@@ -1509,24 +1503,23 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
  */
 
 static int
-gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
-	       struct intel_engine_cs *signaller,
-	       u32 seqno)
+gen8_ring_sync(struct drm_i915_gem_request *wait,
+	       struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *waiter = waiter_req->ring;
-	struct drm_i915_private *dev_priv = waiter_req->i915;
-	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
+	struct intel_ring *waiter = wait->ring;
+	struct drm_i915_private *dev_priv = wait->i915;
+	u64 offset = GEN8_WAIT_OFFSET(wait->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
 
-	ret = intel_ring_begin(waiter_req, 4);
+	ret = intel_ring_begin(wait, 4);
 	if (ret)
 		return ret;
 
 	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
 				MI_SEMAPHORE_GLOBAL_GTT |
 				MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(waiter, seqno);
+	intel_ring_emit(waiter, signal->fence.seqno);
 	intel_ring_emit(waiter, lower_32_bits(offset));
 	intel_ring_emit(waiter, upper_32_bits(offset));
 	intel_ring_advance(waiter);
@@ -1536,48 +1529,37 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	 * We do this on the i915_switch_context() following the wait and
 	 * before the dispatch.
 	 */
-	ppgtt = waiter_req->ctx->ppgtt;
-	if (ppgtt && waiter_req->engine->id != RCS)
-		ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
+	ppgtt = wait->ctx->ppgtt;
+	if (ppgtt && wait->engine->id != RCS)
+		ppgtt->pd_dirty_rings |= intel_engine_flag(wait->engine);
 	return 0;
 }
 
 static int
-gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
-	       struct intel_engine_cs *signaller,
-	       u32 seqno)
+gen6_ring_sync(struct drm_i915_gem_request *wait,
+	       struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *waiter = waiter_req->ring;
+	struct intel_ring *waiter = wait->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
-	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id];
+	u32 wait_mbox = signal->engine->semaphore.mbox.wait[wait->engine->id];
 	int ret;
 
-	/* Throughout all of the GEM code, seqno passed implies our current
-	 * seqno is >= the last seqno executed. However for hardware the
-	 * comparison is strictly greater than.
-	 */
-	seqno -= 1;
-
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(waiter_req, 4);
+	ret = intel_ring_begin(wait, 4);
 	if (ret)
 		return ret;
 
-	/* If seqno wrap happened, omit the wait with no-ops */
-	if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
-		intel_ring_emit(waiter, dw1 | wait_mbox);
-		intel_ring_emit(waiter, seqno);
-		intel_ring_emit(waiter, 0);
-		intel_ring_emit(waiter, MI_NOOP);
-	} else {
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-	}
+	intel_ring_emit(waiter, dw1 | wait_mbox);
+	/* Throughout all of the GEM code, seqno passed implies our current
+	 * seqno is >= the last seqno executed. However for hardware the
+	 * comparison is strictly greater than.
+	 */
+	intel_ring_emit(waiter, signal->fence.seqno - 1);
+	intel_ring_emit(waiter, 0);
+	intel_ring_emit(waiter, MI_NOOP);
 	intel_ring_advance(waiter);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 08e86204a3d5..65cb6adf26ca 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -275,9 +275,8 @@ struct intel_engine_cs {
 		};
 
 		/* AKA wait() */
-		int	(*sync_to)(struct drm_i915_gem_request *to_req,
-				   struct intel_engine_cs *from,
-				   u32 seqno);
+		int	(*sync_to)(struct drm_i915_gem_request *to,
+				   struct drm_i915_gem_request *from);
 		int	(*signal)(struct drm_i915_gem_request *signaller_req);
 	} semaphore;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux