We were attempting to use a per-ring spinlock whilst modifying global IRQ flags. A recipe for rare missed interrupts. Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk> --- drivers/gpu/drm/i915/intel_ringbuffer.c | 41 ++++++++++++++++++------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 3 +-- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f797613..b7f446e 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -610,17 +610,18 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; if (!dev->irq_enabled) return false; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } @@ -630,14 +631,15 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { dev_priv->gt_irq_mask |= ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } static bool @@ -645,17 +647,18 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; if (!dev->irq_enabled) return false; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { dev_priv->irq_mask &= ~ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } @@ -665,14 +668,15 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { dev_priv->irq_mask |= ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } static bool @@ -680,17 +684,18 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; if (!dev->irq_enabled) return false; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { dev_priv->irq_mask &= ~ring->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } @@ -700,14 +705,15 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { dev_priv->irq_mask |= ring->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } void intel_ring_setup_status_page(struct intel_ring_buffer *ring) @@ -786,6 +792,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; if (!dev->irq_enabled) return false; @@ -795,14 +802,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) * blt/bsd rings on ivb. */ gen6_gt_force_wake_get(dev_priv); - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { I915_WRITE_IMR(ring, ~ring->irq_enable_mask); dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } @@ -812,15 +819,16 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { I915_WRITE_IMR(ring, ~0); dev_priv->gt_irq_mask |= ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); gen6_gt_force_wake_put(dev_priv); } @@ -954,7 +962,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->size = 32 * PAGE_SIZE; init_waitqueue_head(&ring->irq_queue); - spin_lock_init(&ring->irq_lock); if (I915_NEED_GFX_HWS(dev)) { ret = init_status_page(ring); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 06a66ad..e0b25bb 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -56,8 +56,7 @@ struct intel_ring_buffer { */ u32 last_retired_head; - spinlock_t irq_lock; - u32 irq_refcount; + u32 irq_refcount; /* protected by dev_priv->irq_lock */ u32 irq_enable_mask; /* bitmask to enable ring interrupt */ u32 irq_seqno; /* last seq seem at irq time */ u32 trace_irq_seqno; -- 1.7.10