From: Chris Wilson <chris at chris-wilson.co.uk> We were attempting to use a per-ring spinlock whilst modifying global IRQ flags. A recipe for rare missed interrupts. Signed-off-by: Chris Wilson <chris at chris-wilson.co.uk> Acked-by: Ben Widawsky <ben at bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter at ffwll.ch> Conflicts: drivers/gpu/drm/i915/intel_ringbuffer.c --- drivers/gpu/drm/i915/intel_ringbuffer.c | 31 ++++++++++++++++++------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 3 +-- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 12d9bc7..3b38157 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -613,17 +613,18 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; if (!dev->irq_enabled) return false; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } @@ -633,14 +634,15 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { dev_priv->gt_irq_mask |= ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } static bool @@ -648,17 +650,18 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; if (!dev->irq_enabled) return false; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { dev_priv->irq_mask &= ~ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } @@ -668,14 +671,15 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { dev_priv->irq_mask |= ring->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } void intel_ring_setup_status_page(struct intel_ring_buffer *ring) @@ -754,6 +758,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; if (!dev->irq_enabled) return false; @@ -763,14 +768,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) * blt/bsd rings on ivb. */ gen6_gt_force_wake_get(dev_priv); - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (ring->irq_refcount++ == 0) { I915_WRITE_IMR(ring, ~ring->irq_enable_mask); dev_priv->gt_irq_mask &= ~ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } @@ -780,15 +785,16 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; drm_i915_private_t *dev_priv = dev->dev_private; + unsigned long flags; - spin_lock(&ring->irq_lock); + spin_lock_irqsave(&dev_priv->irq_lock, flags); if (--ring->irq_refcount == 0) { I915_WRITE_IMR(ring, ~0); dev_priv->gt_irq_mask |= ring->irq_enable_mask; I915_WRITE(GTIMR, dev_priv->gt_irq_mask); POSTING_READ(GTIMR); } - spin_unlock(&ring->irq_lock); + spin_unlock_irqrestore(&dev_priv->irq_lock, flags); gen6_gt_force_wake_put(dev_priv); } @@ -922,7 +928,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->size = 32 * PAGE_SIZE; init_waitqueue_head(&ring->irq_queue); - spin_lock_init(&ring->irq_lock); if (I915_NEED_GFX_HWS(dev)) { ret = init_status_page(ring); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 06a66ad..e0b25bb 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -56,8 +56,7 @@ struct intel_ring_buffer { */ u32 last_retired_head; - spinlock_t irq_lock; - u32 irq_refcount; + u32 irq_refcount; /* protected by dev_priv->irq_lock */ u32 irq_enable_mask; /* bitmask to enable ring interrupt */ u32 irq_seqno; /* last seq seem at irq time */ u32 trace_irq_seqno; -- 1.7.10