Now that we have sensibly split up, we can nicely get rid of that ugly is_gen5 check. Signed-Off-by: Daniel Vetter <daniel.vetter at ffwll.ch> --- drivers/gpu/drm/i915/intel_ringbuffer.c | 66 ++++++++++++++++++++---------- 1 files changed, 44 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 6624a22..36dd660 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -639,6 +639,35 @@ i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) } static bool +gen5_ring_get_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + if (!dev->irq_enabled) + return false; + + spin_lock(&ring->irq_lock); + if (ring->irq_refcount++ == 0) + ironlake_enable_irq(dev_priv, ring->irq_enable_mask); + spin_unlock(&ring->irq_lock); + + return true; +} + +static void +gen5_ring_put_irq(struct intel_ring_buffer *ring) +{ + struct drm_device *dev = ring->dev; + drm_i915_private_t *dev_priv = dev->dev_private; + + spin_lock(&ring->irq_lock); + if (--ring->irq_refcount == 0) + ironlake_disable_irq(dev_priv, ring->irq_enable_mask); + spin_unlock(&ring->irq_lock); +} + +static bool i9xx_ring_get_irq(struct intel_ring_buffer *ring) { struct drm_device *dev = ring->dev; @@ -648,13 +677,8 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring) return false; spin_lock(&ring->irq_lock); - if (ring->irq_refcount++ == 0) { - if (INTEL_INFO(dev)->gen >= 5) - ironlake_enable_irq(dev_priv, - ring->irq_enable_mask); - else - i915_enable_irq(dev_priv, ring->irq_enable_mask); - } + if (ring->irq_refcount++ == 0) + i915_enable_irq(dev_priv, ring->irq_enable_mask); spin_unlock(&ring->irq_lock); return true; @@ -667,13 +691,8 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring) drm_i915_private_t *dev_priv = dev->dev_private; spin_lock(&ring->irq_lock); - if (--ring->irq_refcount == 0) { - if (INTEL_INFO(dev)->gen >= 5) - ironlake_disable_irq(dev_priv, - ring->irq_enable_mask); - else - i915_disable_irq(dev_priv, ring->irq_enable_mask); - } + if (--ring->irq_refcount == 0) + i915_disable_irq(dev_priv, ring->irq_enable_mask); spin_unlock(&ring->irq_lock); } @@ -1295,8 +1314,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) ring->add_request = pc_render_add_request; ring->flush = render_ring_flush; ring->get_seqno = pc_render_get_seqno; - ring->irq_get = i9xx_ring_get_irq; - ring->irq_put = i9xx_ring_put_irq; + ring->irq_get = gen5_ring_get_irq; + ring->irq_put = gen5_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; } else { ring->add_request = render_ring_add_request; @@ -1336,8 +1355,8 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) ring->add_request = pc_render_add_request; ring->flush = render_ring_flush; ring->get_seqno = pc_render_get_seqno; - ring->irq_get = i9xx_ring_get_irq; - ring->irq_put = i9xx_ring_put_irq; + ring->irq_get = gen5_ring_get_irq; + ring->irq_put = gen5_ring_put_irq; ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; } else { ring->add_request = render_ring_add_request; @@ -1412,12 +1431,15 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) ring->flush = bsd_ring_flush; ring->add_request = ring_add_request; ring->get_seqno = ring_get_seqno; - ring->irq_get = i9xx_ring_get_irq; - ring->irq_put = i9xx_ring_put_irq; - if (IS_GEN5(dev)) + if (IS_GEN5(dev)) { ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; - else + ring->irq_get = gen5_ring_get_irq; + ring->irq_put = gen5_ring_put_irq; + } else { ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; + ring->irq_get = i9xx_ring_get_irq; + ring->irq_put = i9xx_ring_put_irq; + } ring->dispatch_execbuffer = ring_dispatch_execbuffer; } ring->init = init_ring_common; -- 1.7.7.5