[PATCH 068/190] drm/i915: Unify adding requests between ringbuffer and execlists

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/i915_gem_request.c |   8 +-
 drivers/gpu/drm/i915/intel_lrc.c        |  14 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c | 129 +++++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  21 +++---
 4 files changed, 87 insertions(+), 85 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index ce663acc9c7d..01443d8d9224 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -434,13 +434,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 */
 	request->postfix = intel_ring_get_tail(ring);
 
-	if (i915.enable_execlists)
-		ret = request->engine->emit_request(request);
-	else {
-		ret = request->engine->add_request(request);
-
-		request->tail = intel_ring_get_tail(ring);
-	}
+	ret = request->engine->add_request(request);
 	/* Not allowed to fail! */
 	WARN(ret, "emit|add_request failed: %d!\n", ret);
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 30effca91184..9838503fafca 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -445,7 +445,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
 		if (req0->elsp_submitted) {
 			/*
 			 * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL
-			 * as we resubmit the request. See gen8_emit_request()
+			 * as we resubmit the request. See gen8_add_request()
 			 * for where we prepare the padding after the end of the
 			 * request.
 			 */
@@ -1588,7 +1588,7 @@ gen6_seqno_barrier(struct intel_engine_cs *ring)
 	intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
-static int gen8_emit_request(struct drm_i915_gem_request *request)
+static int gen8_add_request(struct drm_i915_gem_request *request)
 {
 	struct intel_ring *ring = request->ring;
 	u32 cmd;
@@ -1782,8 +1782,8 @@ static int logical_render_ring_init(struct drm_device *dev)
 	ring->init_context = gen8_init_rcs_context;
 	ring->cleanup = intel_fini_pipe_control;
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
-	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush_render;
+	ring->add_request = gen8_add_request;
 	ring->irq_enable = gen8_logical_ring_enable_irq;
 	ring->irq_disable = gen8_logical_ring_disable_irq;
 	ring->emit_bb_start = gen8_emit_bb_start;
@@ -1828,8 +1828,8 @@ static int logical_bsd_ring_init(struct drm_device *dev)
 
 	ring->init_hw = gen8_init_common_ring;
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
-	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
+	ring->add_request = gen8_add_request;
 	ring->irq_enable = gen8_logical_ring_enable_irq;
 	ring->irq_disable = gen8_logical_ring_disable_irq;
 	ring->emit_bb_start = gen8_emit_bb_start;
@@ -1852,8 +1852,8 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
 
 	ring->init_hw = gen8_init_common_ring;
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
-	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
+	ring->add_request = gen8_add_request;
 	ring->irq_enable = gen8_logical_ring_enable_irq;
 	ring->irq_disable = gen8_logical_ring_disable_irq;
 	ring->emit_bb_start = gen8_emit_bb_start;
@@ -1876,8 +1876,8 @@ static int logical_blt_ring_init(struct drm_device *dev)
 
 	ring->init_hw = gen8_init_common_ring;
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
-	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
+	ring->add_request = gen8_add_request;
 	ring->irq_enable = gen8_logical_ring_enable_irq;
 	ring->irq_disable = gen8_logical_ring_disable_irq;
 	ring->emit_bb_start = gen8_emit_bb_start;
@@ -1900,8 +1900,8 @@ static int logical_vebox_ring_init(struct drm_device *dev)
 
 	ring->init_hw = gen8_init_common_ring;
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
-	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
+	ring->add_request = gen8_add_request;
 	ring->irq_enable = gen8_logical_ring_enable_irq;
 	ring->irq_disable = gen8_logical_ring_disable_irq;
 	ring->emit_bb_start = gen8_emit_bb_start;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 04f0a77d49cf..556e9e2c1fec 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -59,13 +59,6 @@ int intel_ring_space(struct intel_ring *ringbuf)
 	return ringbuf->space;
 }
 
-static void __intel_ring_advance(struct intel_engine_cs *ring)
-{
-	struct intel_ring *ringbuf = ring->buffer;
-	ringbuf->tail &= ringbuf->size - 1;
-	ring->write_tail(ring, ringbuf->tail);
-}
-
 static int
 gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
@@ -418,13 +411,6 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
 	return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
-static void ring_write_tail(struct intel_engine_cs *ring,
-			    u32 value)
-{
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	I915_WRITE_TAIL(ring, value);
-}
-
 u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
@@ -533,7 +519,7 @@ static bool stop_ring(struct intel_engine_cs *ring)
 
 	I915_WRITE_CTL(ring, 0);
 	I915_WRITE_HEAD(ring, 0);
-	ring->write_tail(ring, 0);
+	I915_WRITE_TAIL(ring, 0);
 
 	if (!IS_GEN2(ring->dev)) {
 		(void)I915_READ_CTL(ring);
@@ -1308,6 +1294,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_ring *ring = req->ring;
 	int ret;
 
@@ -1323,7 +1310,61 @@ gen6_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, req->fence.seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_ring_advance(req->engine);
+	intel_ring_advance(ring);
+
+	req->tail = intel_ring_get_tail(ring);
+	I915_WRITE_TAIL(req->engine, req->tail);
+
+	return 0;
+}
+
+static int
+gen6_bsd_add_request(struct drm_i915_gem_request *req)
+{
+	struct drm_i915_private *dev_priv = req->i915;
+	struct intel_ring *ring = req->ring;
+	int ret;
+
+	if (req->engine->semaphore.signal)
+		ret = req->engine->semaphore.signal(req, 4);
+	else
+		ret = intel_ring_begin(req, 4);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, req->fence.seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_advance(ring);
+
+       /* Every tail move must follow the sequence below */
+
+	/* Disable notification that the ring is IDLE. The GT
+	 * will then assume that it is busy and bring it out of rc6.
+	 */
+	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+		   _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+
+	/* Clear the context id. Here be magic! */
+	I915_WRITE64(GEN6_BSD_RNCID, 0x0);
+
+	/* Wait for the ring not to be idle, i.e. for it to wake up. */
+	if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+		      GEN6_BSD_SLEEP_INDICATOR) == 0,
+		     50))
+		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+
+	/* Now that the ring is fully powered up, update the tail */
+	req->tail = intel_ring_get_tail(ring);
+	I915_WRITE_TAIL(req->engine, req->tail);
+	POSTING_READ(RING_TAIL(req->engine->mmio_base));
+
+	/* Let the ring send IDLE messages to the GT again,
+	 * and so let it sleep to conserve power when idle.
+	 */
+	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
 
 	return 0;
 }
@@ -1423,6 +1464,7 @@ do {									\
 static int
 pc_render_add_request(struct drm_i915_gem_request *req)
 {
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_ring *ring = req->ring;
 	u32 addr = req->engine->status_page.gfx_addr +
 		(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
@@ -1467,7 +1509,10 @@ pc_render_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, addr | PIPE_CONTROL_GLOBAL_GTT);
 	intel_ring_emit(ring, req->fence.seqno);
 	intel_ring_emit(ring, 0);
-	__intel_ring_advance(req->engine);
+	intel_ring_advance(ring);
+
+	req->tail = intel_ring_get_tail(ring);
+	I915_WRITE_TAIL(req->engine, req->tail);
 
 	return 0;
 }
@@ -1566,6 +1611,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_ring *ring = req->ring;
 	int ret;
 
@@ -1577,7 +1623,10 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, req->fence.seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_ring_advance(req->engine);
+	intel_ring_advance(ring);
+
+	req->tail = intel_ring_get_tail(ring);
+	I915_WRITE_TAIL(req->engine, req->tail);
 
 	return 0;
 }
@@ -2283,39 +2332,6 @@ void intel_engine_init_seqno(struct intel_engine_cs *ring, u32 seqno)
 	ring->hangcheck.seqno = seqno;
 }
 
-static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,
-				     u32 value)
-{
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-
-       /* Every tail move must follow the sequence below */
-
-	/* Disable notification that the ring is IDLE. The GT
-	 * will then assume that it is busy and bring it out of rc6.
-	 */
-	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-		   _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
-
-	/* Clear the context id. Here be magic! */
-	I915_WRITE64(GEN6_BSD_RNCID, 0x0);
-
-	/* Wait for the ring not to be idle, i.e. for it to wake up. */
-	if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
-		      GEN6_BSD_SLEEP_INDICATOR) == 0,
-		     50))
-		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
-
-	/* Now that the ring is fully powered up, update the tail */
-	I915_WRITE_TAIL(ring, value);
-	POSTING_READ(RING_TAIL(ring->mmio_base));
-
-	/* Let the ring send IDLE messages to the GT again,
-	 * and so let it sleep to conserve power when idle.
-	 */
-	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
-}
-
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
@@ -2575,7 +2591,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 		}
 		ring->irq_enable_mask = I915_USER_INTERRUPT;
 	}
-	ring->write_tail = ring_write_tail;
 
 	if (IS_HASWELL(dev))
 		ring->emit_bb_start = hsw_emit_bb_start;
@@ -2632,14 +2647,13 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 	ring->name = "bsd ring";
 	ring->id = VCS;
 
-	ring->write_tail = ring_write_tail;
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ring->mmio_base = GEN6_BSD_RING_BASE;
-		/* gen6 bsd needs a special wa for tail updates */
-		if (IS_GEN6(dev))
-			ring->write_tail = gen6_bsd_ring_write_tail;
 		ring->emit_flush = gen6_bsd_ring_flush;
+		/* gen6 bsd needs a special wa for tail updates */
 		ring->add_request = gen6_add_request;
+		if (IS_GEN6(dev))
+			ring->add_request = gen6_bsd_add_request;
 		ring->irq_seqno_barrier = gen6_seqno_barrier;
 		if (INTEL_INFO(dev)->gen >= 8) {
 			ring->irq_enable_mask =
@@ -2703,7 +2717,6 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 	ring->name = "bsd2 ring";
 	ring->id = VCS2;
 
-	ring->write_tail = ring_write_tail;
 	ring->mmio_base = GEN8_BSD2_RING_BASE;
 	ring->emit_flush = gen6_bsd_ring_flush;
 	ring->add_request = gen6_add_request;
@@ -2732,7 +2745,6 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 	ring->id = BCS;
 
 	ring->mmio_base = BLT_RING_BASE;
-	ring->write_tail = ring_write_tail;
 	ring->emit_flush = gen6_ring_flush;
 	ring->add_request = gen6_add_request;
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
@@ -2788,7 +2800,6 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 	ring->id = VECS;
 
 	ring->mmio_base = VEBOX_RING_BASE;
-	ring->write_tail = ring_write_tail;
 	ring->emit_flush = gen6_ring_flush;
 	ring->add_request = gen6_add_request;
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3a10376b896f..8147ce1379fb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -213,8 +213,15 @@ struct intel_engine_cs {
 
 	int		(*init_context)(struct drm_i915_gem_request *req);
 
-	void		(*write_tail)(struct intel_engine_cs *ring,
-				      u32 value);
+	int		(*emit_flush)(struct drm_i915_gem_request *request,
+				      u32 invalidate_domains,
+				      u32 flush_domains);
+	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
+					 u64 offset, u32 length,
+					 unsigned dispatch_flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS     0x4
 	int		(*add_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
@@ -290,16 +297,6 @@ struct intel_engine_cs {
 	struct list_head execlist_retired_req_list;
 	u8 next_context_status_buffer;
 	u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
-	int		(*emit_request)(struct drm_i915_gem_request *request);
-	int		(*emit_flush)(struct drm_i915_gem_request *request,
-				      u32 invalidate_domains,
-				      u32 flush_domains);
-	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
-					 u64 offset, u32 length,
-					 unsigned dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS     0x4
 
 	/**
 	 * List of objects currently involved in rendering from the
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux