[PATCH 030/190] drm/i915: Move the get/put irq locking into the caller

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



With only a single callsite for intel_engine_cs->irq_get and ->irq_put,
we can reduce the code size by moving the common preamble into the
caller, and we can also eliminate the reference counting.

For completeness, as we are no longer doing reference counting on irq,
rename the get/put vfunctions to enable/disable respectively.

Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>
---
 drivers/gpu/drm/i915/intel_breadcrumbs.c |   8 +-
 drivers/gpu/drm/i915/intel_lrc.c         |  53 ++----
 drivers/gpu/drm/i915/intel_ringbuffer.c  | 302 ++++++++++---------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h  |   5 +-
 4 files changed, 125 insertions(+), 243 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index cf9cbcc2d5d7..0ea01bd6811c 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -51,12 +51,16 @@ static void irq_enable(struct intel_engine_cs *engine)
 	 */
 	engine->irq_posted = true;
 
-	WARN_ON(!engine->irq_get(engine));
+	spin_lock_irq(&engine->i915->irq_lock);
+	engine->irq_enable(engine);
+	spin_unlock_irq(&engine->i915->irq_lock);
 }
 
 static void irq_disable(struct intel_engine_cs *engine)
 {
-	engine->irq_put(engine);
+	spin_lock_irq(&engine->i915->irq_lock);
+	engine->irq_disable(engine);
+	spin_unlock_irq(&engine->i915->irq_lock);
 
 	engine->irq_posted = false;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 27d91f1ceb2b..b1ede2e9b372 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1640,37 +1640,20 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 	return 0;
 }
 
-static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
+static void gen8_logical_ring_enable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
-		POSTING_READ(RING_IMR(ring->mmio_base));
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	return true;
+	I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
+	POSTING_READ(RING_IMR(ring->mmio_base));
 }
 
-static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring)
+static void gen8_logical_ring_disable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
-		POSTING_READ(RING_IMR(ring->mmio_base));
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	I915_WRITE_IMR(ring, ~ring->irq_keep_mask);
+	POSTING_READ(RING_IMR(ring->mmio_base));
 }
 
 static int gen8_emit_flush(struct drm_i915_gem_request *request,
@@ -1993,8 +1976,8 @@ static int logical_render_ring_init(struct drm_device *dev)
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
 	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush_render;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
+	ring->irq_enable = gen8_logical_ring_enable_irq;
+	ring->irq_disable = gen8_logical_ring_disable_irq;
 	ring->emit_bb_start = gen8_emit_bb_start;
 
 	ring->dev = dev;
@@ -2039,8 +2022,8 @@ static int logical_bsd_ring_init(struct drm_device *dev)
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
 	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
+	ring->irq_enable = gen8_logical_ring_enable_irq;
+	ring->irq_disable = gen8_logical_ring_disable_irq;
 	ring->emit_bb_start = gen8_emit_bb_start;
 
 	return logical_ring_init(dev, ring);
@@ -2063,8 +2046,8 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
 	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
+	ring->irq_enable = gen8_logical_ring_enable_irq;
+	ring->irq_disable = gen8_logical_ring_disable_irq;
 	ring->emit_bb_start = gen8_emit_bb_start;
 
 	return logical_ring_init(dev, ring);
@@ -2087,8 +2070,8 @@ static int logical_blt_ring_init(struct drm_device *dev)
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
 	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
+	ring->irq_enable = gen8_logical_ring_enable_irq;
+	ring->irq_disable = gen8_logical_ring_disable_irq;
 	ring->emit_bb_start = gen8_emit_bb_start;
 
 	return logical_ring_init(dev, ring);
@@ -2111,8 +2094,8 @@ static int logical_vebox_ring_init(struct drm_device *dev)
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
 	ring->emit_request = gen8_emit_request;
 	ring->emit_flush = gen8_emit_flush;
-	ring->irq_get = gen8_logical_ring_get_irq;
-	ring->irq_put = gen8_logical_ring_put_irq;
+	ring->irq_enable = gen8_logical_ring_enable_irq;
+	ring->irq_disable = gen8_logical_ring_disable_irq;
 	ring->emit_bb_start = gen8_emit_bb_start;
 
 	return logical_ring_init(dev, ring);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c86d0e17d785..5625f56a2db1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1503,109 +1503,56 @@ gen6_seqno_barrier(struct intel_engine_cs *ring)
 	intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
-static bool
-gen5_ring_get_irq(struct intel_engine_cs *ring)
+static void
+gen5_ring_enable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0)
-		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	gen5_enable_gt_irq(ring->i915, ring->irq_enable_mask);
 }
 
 static void
-gen5_ring_put_irq(struct intel_engine_cs *ring)
+gen5_ring_disable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0)
-		gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	gen5_disable_gt_irq(ring->i915, ring->irq_enable_mask);
 }
 
-static bool
-i9xx_ring_get_irq(struct intel_engine_cs *ring)
+static void
+i9xx_ring_enable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (!intel_irqs_enabled(dev_priv))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		dev_priv->irq_mask &= ~ring->irq_enable_mask;
-		I915_WRITE(IMR, dev_priv->irq_mask);
-		POSTING_READ(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	return true;
+	dev_priv->irq_mask &= ~ring->irq_enable_mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	POSTING_READ(IMR);
 }
 
 static void
-i9xx_ring_put_irq(struct intel_engine_cs *ring)
+i9xx_ring_disable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		dev_priv->irq_mask |= ring->irq_enable_mask;
-		I915_WRITE(IMR, dev_priv->irq_mask);
-		POSTING_READ(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	dev_priv->irq_mask |= ring->irq_enable_mask;
+	I915_WRITE(IMR, dev_priv->irq_mask);
+	POSTING_READ(IMR);
 }
 
-static bool
-i8xx_ring_get_irq(struct intel_engine_cs *ring)
+static void
+i8xx_ring_enable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (!intel_irqs_enabled(dev_priv))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		dev_priv->irq_mask &= ~ring->irq_enable_mask;
-		I915_WRITE16(IMR, dev_priv->irq_mask);
-		POSTING_READ16(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	return true;
+	dev_priv->irq_mask &= ~ring->irq_enable_mask;
+	I915_WRITE16(IMR, dev_priv->irq_mask);
+	POSTING_READ16(IMR);
 }
 
 static void
-i8xx_ring_put_irq(struct intel_engine_cs *ring)
+i8xx_ring_disable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		dev_priv->irq_mask |= ring->irq_enable_mask;
-		I915_WRITE16(IMR, dev_priv->irq_mask);
-		POSTING_READ16(IMR);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	dev_priv->irq_mask |= ring->irq_enable_mask;
+	I915_WRITE16(IMR, dev_priv->irq_mask);
+	POSTING_READ16(IMR);
 }
 
 static int
@@ -1645,128 +1592,77 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static bool
-gen6_ring_get_irq(struct intel_engine_cs *ring)
+static void
+gen6_ring_enable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS)
-			I915_WRITE_IMR(ring,
-				       ~(ring->irq_enable_mask |
-					 GT_PARITY_ERROR(dev)));
-		else
-			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-		gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	return true;
+	if (HAS_L3_DPF(dev_priv) && ring->id == RCS)
+		I915_WRITE_IMR(ring,
+			       ~(ring->irq_enable_mask |
+				 GT_PARITY_ERROR(dev_priv)));
+	else
+		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+	gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask);
 }
 
 static void
-gen6_ring_put_irq(struct intel_engine_cs *ring)
+gen6_ring_disable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS)
-			I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
-		else
-			I915_WRITE_IMR(ring, ~0);
-		gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	if (HAS_L3_DPF(dev_priv) && ring->id == RCS)
+		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev_priv));
+	else
+		I915_WRITE_IMR(ring, ~0);
+	gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask);
 }
 
-static bool
-hsw_vebox_get_irq(struct intel_engine_cs *ring)
+static void
+hsw_vebox_enable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
-
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-		gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	return true;
+	I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
+	gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask);
 }
 
 static void
-hsw_vebox_put_irq(struct intel_engine_cs *ring)
+hsw_vebox_disable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		I915_WRITE_IMR(ring, ~0);
-		gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
-	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	I915_WRITE_IMR(ring, ~0);
+	gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask);
 }
 
-static bool
-gen8_ring_get_irq(struct intel_engine_cs *ring)
+static void
+gen8_ring_enable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
-
-	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
-		return false;
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (ring->irq_refcount++ == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS) {
-			I915_WRITE_IMR(ring,
-				       ~(ring->irq_enable_mask |
-					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
-		} else {
-			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
-		}
-		POSTING_READ(RING_IMR(ring->mmio_base));
+	if (HAS_L3_DPF(dev_priv) && ring->id == RCS) {
+		I915_WRITE_IMR(ring,
+			       ~(ring->irq_enable_mask |
+				 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
+	} else {
+		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
 	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
-
-	return true;
+	POSTING_READ(RING_IMR(ring->mmio_base));
 }
 
 static void
-gen8_ring_put_irq(struct intel_engine_cs *ring)
+gen8_ring_disable_irq(struct intel_engine_cs *ring)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long flags;
+	struct drm_i915_private *dev_priv = ring->i915;
 
-	spin_lock_irqsave(&dev_priv->irq_lock, flags);
-	if (--ring->irq_refcount == 0) {
-		if (HAS_L3_DPF(dev) && ring->id == RCS) {
-			I915_WRITE_IMR(ring,
-				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
-		} else {
-			I915_WRITE_IMR(ring, ~0);
-		}
-		POSTING_READ(RING_IMR(ring->mmio_base));
+	if (HAS_L3_DPF(dev_priv) && ring->id == RCS) {
+		I915_WRITE_IMR(ring,
+			       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+	} else {
+		I915_WRITE_IMR(ring, ~0);
 	}
-	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+	POSTING_READ(RING_IMR(ring->mmio_base));
 }
 
 static int
@@ -2667,8 +2563,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 		ring->init_context = intel_rcs_ctx_init;
 		ring->add_request = gen6_add_request;
 		ring->flush = gen8_render_ring_flush;
-		ring->irq_get = gen8_ring_get_irq;
-		ring->irq_put = gen8_ring_put_irq;
+		ring->irq_enable = gen8_ring_enable_irq;
+		ring->irq_disable = gen8_ring_disable_irq;
 		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 		ring->irq_seqno_barrier = gen6_seqno_barrier;
 		if (i915_semaphore_is_enabled(dev)) {
@@ -2683,8 +2579,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 		ring->flush = gen7_render_ring_flush;
 		if (INTEL_INFO(dev)->gen == 6)
 			ring->flush = gen6_render_ring_flush;
-		ring->irq_get = gen6_ring_get_irq;
-		ring->irq_put = gen6_ring_put_irq;
+		ring->irq_enable = gen6_ring_enable_irq;
+		ring->irq_disable = gen6_ring_disable_irq;
 		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
 		ring->irq_seqno_barrier = gen6_seqno_barrier;
 		if (i915_semaphore_is_enabled(dev)) {
@@ -2711,8 +2607,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 	} else if (IS_GEN5(dev)) {
 		ring->add_request = pc_render_add_request;
 		ring->flush = gen4_render_ring_flush;
-		ring->irq_get = gen5_ring_get_irq;
-		ring->irq_put = gen5_ring_put_irq;
+		ring->irq_enable = gen5_ring_enable_irq;
+		ring->irq_disable = gen5_ring_disable_irq;
 		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
 					GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
 	} else {
@@ -2722,11 +2618,11 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 		else
 			ring->flush = gen4_render_ring_flush;
 		if (IS_GEN2(dev)) {
-			ring->irq_get = i8xx_ring_get_irq;
-			ring->irq_put = i8xx_ring_put_irq;
+			ring->irq_enable = i8xx_ring_enable_irq;
+			ring->irq_disable = i8xx_ring_disable_irq;
 		} else {
-			ring->irq_get = i9xx_ring_get_irq;
-			ring->irq_put = i9xx_ring_put_irq;
+			ring->irq_enable = i9xx_ring_enable_irq;
+			ring->irq_disable = i9xx_ring_disable_irq;
 		}
 		ring->irq_enable_mask = I915_USER_INTERRUPT;
 	}
@@ -2799,8 +2695,8 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 		if (INTEL_INFO(dev)->gen >= 8) {
 			ring->irq_enable_mask =
 				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-			ring->irq_get = gen8_ring_get_irq;
-			ring->irq_put = gen8_ring_put_irq;
+			ring->irq_enable = gen8_ring_enable_irq;
+			ring->irq_disable = gen8_ring_disable_irq;
 			ring->dispatch_execbuffer =
 				gen8_ring_dispatch_execbuffer;
 			if (i915_semaphore_is_enabled(dev)) {
@@ -2810,8 +2706,8 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 			}
 		} else {
 			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
-			ring->irq_get = gen6_ring_get_irq;
-			ring->irq_put = gen6_ring_put_irq;
+			ring->irq_enable = gen6_ring_enable_irq;
+			ring->irq_disable = gen6_ring_disable_irq;
 			ring->dispatch_execbuffer =
 				gen6_ring_dispatch_execbuffer;
 			if (i915_semaphore_is_enabled(dev)) {
@@ -2835,12 +2731,12 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 		ring->add_request = i9xx_add_request;
 		if (IS_GEN5(dev)) {
 			ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
-			ring->irq_get = gen5_ring_get_irq;
-			ring->irq_put = gen5_ring_put_irq;
+			ring->irq_enable = gen5_ring_enable_irq;
+			ring->irq_disable = gen5_ring_disable_irq;
 		} else {
 			ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
-			ring->irq_get = i9xx_ring_get_irq;
-			ring->irq_put = i9xx_ring_put_irq;
+			ring->irq_enable = i9xx_ring_enable_irq;
+			ring->irq_disable = i9xx_ring_disable_irq;
 		}
 		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
 	}
@@ -2867,8 +2763,8 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 	ring->irq_seqno_barrier = gen6_seqno_barrier;
 	ring->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-	ring->irq_get = gen8_ring_get_irq;
-	ring->irq_put = gen8_ring_put_irq;
+	ring->irq_enable = gen8_ring_enable_irq;
+	ring->irq_disable = gen8_ring_disable_irq;
 	ring->dispatch_execbuffer =
 			gen8_ring_dispatch_execbuffer;
 	if (i915_semaphore_is_enabled(dev)) {
@@ -2897,8 +2793,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 	if (INTEL_INFO(dev)->gen >= 8) {
 		ring->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-		ring->irq_get = gen8_ring_get_irq;
-		ring->irq_put = gen8_ring_put_irq;
+		ring->irq_enable = gen8_ring_enable_irq;
+		ring->irq_disable = gen8_ring_disable_irq;
 		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
 			ring->semaphore.sync_to = gen8_ring_sync;
@@ -2907,8 +2803,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 		}
 	} else {
 		ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
-		ring->irq_get = gen6_ring_get_irq;
-		ring->irq_put = gen6_ring_put_irq;
+		ring->irq_enable = gen6_ring_enable_irq;
+		ring->irq_disable = gen6_ring_disable_irq;
 		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
 			ring->semaphore.signal = gen6_signal;
@@ -2954,8 +2850,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 	if (INTEL_INFO(dev)->gen >= 8) {
 		ring->irq_enable_mask =
 			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-		ring->irq_get = gen8_ring_get_irq;
-		ring->irq_put = gen8_ring_put_irq;
+		ring->irq_enable = gen8_ring_enable_irq;
+		ring->irq_disable = gen8_ring_disable_irq;
 		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
 			ring->semaphore.sync_to = gen8_ring_sync;
@@ -2964,8 +2860,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 		}
 	} else {
 		ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
-		ring->irq_get = hsw_vebox_get_irq;
-		ring->irq_put = hsw_vebox_put_irq;
+		ring->irq_enable = hsw_vebox_enable_irq;
+		ring->irq_disable = hsw_vebox_disable_irq;
 		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
 		if (i915_semaphore_is_enabled(dev)) {
 			ring->semaphore.sync_to = gen6_ring_sync;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ba81052999fa..3364bcebd456 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -202,11 +202,10 @@ struct  intel_engine_cs {
 	struct intel_hw_status_page status_page;
 	struct i915_ctx_workarounds wa_ctx;
 
-	unsigned irq_refcount; /* protected by dev_priv->irq_lock */
 	bool		irq_posted;
 	u32		irq_enable_mask;	/* bitmask to enable ring interrupt */
-	bool __must_check (*irq_get)(struct intel_engine_cs *ring);
-	void		(*irq_put)(struct intel_engine_cs *ring);
+	void		(*irq_enable)(struct intel_engine_cs *ring);
+	void		(*irq_disable)(struct intel_engine_cs *ring);
 
 	int		(*init_hw)(struct intel_engine_cs *ring);
 
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux