With only a single callsite for intel_engine_cs->irq_get and ->irq_put, we can reduce the code size by moving the common preamble into the caller, and we can also eliminate the reference counting. For completeness, as we are no longer doing reference counting on irq, rename the get/put vfunctions to enable/disable respectively. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_irq.c | 8 +- drivers/gpu/drm/i915/intel_breadcrumbs.c | 8 +- drivers/gpu/drm/i915/intel_lrc.c | 34 +--- drivers/gpu/drm/i915/intel_ringbuffer.c | 269 ++++++++++--------------------- drivers/gpu/drm/i915/intel_ringbuffer.h | 5 +- 5 files changed, 106 insertions(+), 218 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index bcce52507b33..09d5de154aee 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, dev_priv->gt_irq_mask &= ~interrupt_mask; dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); I915_WRITE(GTIMR, dev_priv->gt_irq_mask); - POSTING_READ(GTIMR); } void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) { ilk_update_gt_irq(dev_priv, mask, mask); + POSTING_READ_FW(GTIMR); } void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) @@ -2859,9 +2859,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno) } static bool -ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr) +ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr) { - if (INTEL_GEN(dev_priv) >= 8) { + if (INTEL_GEN(engine->i915) >= 8) { return (ipehr >> 23) == 0x1c; } else { ipehr &= ~MI_SEMAPHORE_SYNC_MASK; @@ -2932,7 +2932,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) return NULL; ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); - if (!ipehr_is_semaphore_wait(engine->i915, ipehr)) + if (!ipehr_is_semaphore_wait(engine, ipehr)) return NULL; /* diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index 14fb9fdcde3a..475c454d11bf 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c @@ -51,12 +51,16 @@ static void irq_enable(struct intel_engine_cs *engine) */ engine->irq_posted = true; - WARN_ON(!engine->irq_get(engine)); + spin_lock_irq(&engine->i915->irq_lock); + engine->irq_enable(engine); + spin_unlock_irq(&engine->i915->irq_lock); } static void irq_disable(struct intel_engine_cs *engine) { - engine->irq_put(engine); + spin_lock_irq(&engine->i915->irq_lock); + engine->irq_disable(engine); + spin_unlock_irq(&engine->i915->irq_lock); engine->irq_posted = false; } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 8a4b3cd9623f..038040b54b08 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1582,36 +1582,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, return 0; } -static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) +static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | engine->irq_keep_mask)); - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); + POSTING_READ_FW(RING_IMR(engine->mmio_base)); } -static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) +static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - I915_WRITE_IMR(engine, ~engine->irq_keep_mask); - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); } static int gen8_emit_flush(struct drm_i915_gem_request *request, @@ -1899,8 +1881,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->init_hw = gen8_init_common_ring; engine->emit_request = gen8_emit_request; engine->emit_flush = gen8_emit_flush; - engine->irq_get = gen8_logical_ring_get_irq; - engine->irq_put = gen8_logical_ring_put_irq; + engine->irq_enable = gen8_logical_ring_enable_irq; + engine->irq_disable = gen8_logical_ring_disable_irq; engine->emit_bb_start = gen8_emit_bb_start; if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) engine->irq_seqno_barrier = bxt_a_seqno_barrier; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 2624cc7184dc..65522c920883 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -1567,103 +1567,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine) spin_unlock_irq(&dev_priv->uncore.lock); } -static bool -gen5_ring_get_irq(struct intel_engine_cs *engine) +static void +gen5_ring_enable_irq(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) - gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask); } static void -gen5_ring_put_irq(struct intel_engine_cs *engine) +gen5_ring_disable_irq(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) - gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask); } -static bool -i9xx_ring_get_irq(struct intel_engine_cs *engine) +static void +i9xx_ring_enable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - if (!intel_irqs_enabled(dev_priv)) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - dev_priv->irq_mask &= ~engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - return true; + dev_priv->irq_mask &= ~engine->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); + POSTING_READ_FW(RING_IMR(engine->mmio_base)); } static void -i9xx_ring_put_irq(struct intel_engine_cs *engine) +i9xx_ring_disable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - dev_priv->irq_mask |= engine->irq_enable_mask; - I915_WRITE(IMR, dev_priv->irq_mask); - POSTING_READ(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + dev_priv->irq_mask |= engine->irq_enable_mask; + I915_WRITE(IMR, dev_priv->irq_mask); } -static bool -i8xx_ring_get_irq(struct intel_engine_cs *engine) +static void +i8xx_ring_enable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - if (!intel_irqs_enabled(dev_priv)) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - dev_priv->irq_mask &= ~engine->irq_enable_mask; - I915_WRITE16(IMR, dev_priv->irq_mask); - POSTING_READ16(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + dev_priv->irq_mask &= ~engine->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); + POSTING_READ16(RING_IMR(engine->mmio_base)); } static void -i8xx_ring_put_irq(struct intel_engine_cs *engine) +i8xx_ring_disable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - dev_priv->irq_mask |= engine->irq_enable_mask; - I915_WRITE16(IMR, dev_priv->irq_mask); - POSTING_READ16(IMR); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + dev_priv->irq_mask |= engine->irq_enable_mask; + I915_WRITE16(IMR, dev_priv->irq_mask); } static int @@ -1704,122 +1655,74 @@ i9xx_add_request(struct drm_i915_gem_request *req) return 0; } -static bool -gen6_ring_get_irq(struct intel_engine_cs *engine) +static void +gen6_ring_enable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev_priv) && engine->id == RCS) - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | - GT_PARITY_ERROR(dev_priv))); - else - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | + GT_PARITY_ERROR(dev_priv))); + else + I915_WRITE_IMR(engine, ~engine->irq_enable_mask); + gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); } static void -gen6_ring_put_irq(struct intel_engine_cs *engine) +gen6_ring_disable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev_priv) && engine->id == RCS) - I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); - else - I915_WRITE_IMR(engine, ~0); - gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) + I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev_priv)); + else + I915_WRITE_IMR(engine, ~0); + gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); } -static bool -hsw_vebox_get_irq(struct intel_engine_cs *engine) +static void +hsw_vebox_enable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + I915_WRITE_IMR(engine, ~engine->irq_enable_mask); + gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); } static void -hsw_vebox_put_irq(struct intel_engine_cs *engine) +hsw_vebox_disable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - I915_WRITE_IMR(engine, ~0); - gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + I915_WRITE_IMR(engine, ~0); + gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); } -static bool -gen8_ring_get_irq(struct intel_engine_cs *engine) +static void +gen8_ring_enable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - if (WARN_ON(!intel_irqs_enabled(dev_priv))) - return false; - - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (engine->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { - I915_WRITE_IMR(engine, - ~(engine->irq_enable_mask | - GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); - } else { - I915_WRITE_IMR(engine, ~engine->irq_enable_mask); - } - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - - return true; + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | + GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); + else + I915_WRITE_IMR(engine, ~engine->irq_enable_mask); + POSTING_READ_FW(RING_IMR(engine->mmio_base)); } static void -gen8_ring_put_irq(struct intel_engine_cs *engine) +gen8_ring_disable_irq(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv = engine->i915; - unsigned long flags; - spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--engine->irq_refcount == 0) { - if (HAS_L3_DPF(dev_priv) && engine->id == RCS) { - I915_WRITE_IMR(engine, - ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); - } else { - I915_WRITE_IMR(engine, ~0); - } - POSTING_READ(RING_IMR(engine->mmio_base)); - } - spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + if (HAS_L3_DPF(dev_priv) && engine->id == RCS) + I915_WRITE_IMR(engine, + ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); + else + I915_WRITE_IMR(engine, ~0); } static int @@ -2688,8 +2591,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->init_context = intel_rcs_ctx_init; engine->add_request = gen8_render_add_request; engine->flush = gen8_render_ring_flush; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; + engine->irq_enable = gen8_ring_enable_irq; + engine->irq_disable = gen8_ring_disable_irq; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; if (i915_semaphore_is_enabled(dev_priv)) { WARN_ON(!dev_priv->semaphore_obj); @@ -2703,8 +2606,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) engine->flush = gen7_render_ring_flush; if (INTEL_INFO(dev)->gen == 6) engine->flush = gen6_render_ring_flush; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; + engine->irq_enable = gen6_ring_enable_irq; + engine->irq_disable = gen6_ring_disable_irq; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; engine->irq_seqno_barrier = gen6_seqno_barrier; if (i915_semaphore_is_enabled(dev_priv)) { @@ -2731,8 +2634,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev) } else if (IS_GEN5(dev)) { engine->add_request = i9xx_add_request; engine->flush = gen4_render_ring_flush; - engine->irq_get = gen5_ring_get_irq; - engine->irq_put = gen5_ring_put_irq; + engine->irq_enable = gen5_ring_enable_irq; + engine->irq_disable = gen5_ring_disable_irq; engine->irq_seqno_barrier = gen5_seqno_barrier; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; } else { @@ -2742,11 +2645,11 @@ int intel_init_render_ring_buffer(struct drm_device *dev) else engine->flush = gen4_render_ring_flush; if (IS_GEN2(dev)) { - engine->irq_get = i8xx_ring_get_irq; - engine->irq_put = i8xx_ring_put_irq; + engine->irq_enable = i8xx_ring_enable_irq; + engine->irq_disable = i8xx_ring_disable_irq; } else { - engine->irq_get = i9xx_ring_get_irq; - engine->irq_put = i9xx_ring_put_irq; + engine->irq_enable = i9xx_ring_enable_irq; + engine->irq_disable = i9xx_ring_disable_irq; } engine->irq_enable_mask = I915_USER_INTERRUPT; } @@ -2821,8 +2724,8 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; + engine->irq_enable = gen8_ring_enable_irq; + engine->irq_disable = gen8_ring_disable_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev_priv)) { @@ -2832,8 +2735,8 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) } } else { engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; + engine->irq_enable = gen6_ring_enable_irq; + engine->irq_disable = gen6_ring_disable_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev_priv)) { @@ -2857,13 +2760,13 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) engine->add_request = i9xx_add_request; if (IS_GEN5(dev)) { engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; - engine->irq_get = gen5_ring_get_irq; - engine->irq_put = gen5_ring_put_irq; + engine->irq_enable = gen5_ring_enable_irq; + engine->irq_disable = gen5_ring_disable_irq; engine->irq_seqno_barrier = gen5_seqno_barrier; } else { engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; - engine->irq_get = i9xx_ring_get_irq; - engine->irq_put = i9xx_ring_put_irq; + engine->irq_enable = i9xx_ring_enable_irq; + engine->irq_disable = i9xx_ring_disable_irq; } engine->dispatch_execbuffer = i965_dispatch_execbuffer; } @@ -2892,8 +2795,8 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev) engine->irq_seqno_barrier = gen6_seqno_barrier; engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; + engine->irq_enable = gen8_ring_enable_irq; + engine->irq_disable = gen8_ring_disable_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev_priv)) { @@ -2924,8 +2827,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; + engine->irq_enable = gen8_ring_enable_irq; + engine->irq_disable = gen8_ring_disable_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; @@ -2934,8 +2837,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) } } else { engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; - engine->irq_get = gen6_ring_get_irq; - engine->irq_put = gen6_ring_put_irq; + engine->irq_enable = gen6_ring_enable_irq; + engine->irq_disable = gen6_ring_disable_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.signal = gen6_signal; @@ -2983,8 +2886,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 8) { engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; - engine->irq_get = gen8_ring_get_irq; - engine->irq_put = gen8_ring_put_irq; + engine->irq_enable = gen8_ring_enable_irq; + engine->irq_disable = gen8_ring_disable_irq; engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen8_ring_sync; @@ -2993,8 +2896,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev) } } else { engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; - engine->irq_get = hsw_vebox_get_irq; - engine->irq_put = hsw_vebox_put_irq; + engine->irq_enable = hsw_vebox_enable_irq; + engine->irq_disable = hsw_vebox_disable_irq; engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev_priv)) { engine->semaphore.sync_to = gen6_ring_sync; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 8abc2b83a15e..df3dac727809 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -200,11 +200,10 @@ struct intel_engine_cs { struct intel_hw_status_page status_page; struct i915_ctx_workarounds wa_ctx; - unsigned irq_refcount; /* protected by dev_priv->irq_lock */ bool irq_posted; u32 irq_enable_mask; /* bitmask to enable ring interrupt */ - bool __must_check (*irq_get)(struct intel_engine_cs *ring); - void (*irq_put)(struct intel_engine_cs *ring); + void (*irq_enable)(struct intel_engine_cs *ring); + void (*irq_disable)(struct intel_engine_cs *ring); int (*init_hw)(struct intel_engine_cs *ring); -- 2.8.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx