2013/9/23 Chris Wilson <chris@xxxxxxxxxxxxxxxxxx>: > Register access is particularly obnoxious on Sandybridge and later due to > the extra work we must do around every read or write. The effect is > magnified on Haswell, as we have per-operation sanity checking > magnifying the number of reads and writes. > > Interrupt handling is supposed to be fast, yet due to the sanity checks > around the register accesss it is not as fast as it could be. If we look > closer, most of the common register operations are reading values we > already know, and redundant flushes. Eliminate these by storing the > desired values rather than reading them back during the interrupt > routine. > > Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> > --- > drivers/gpu/drm/i915/i915_drv.h | 4 ++-- > drivers/gpu/drm/i915/i915_irq.c | 22 ++++++++-------------- > 2 files changed, 10 insertions(+), 16 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h > index db8e4d0..f182a23 100644 > --- a/drivers/gpu/drm/i915/i915_drv.h > +++ b/drivers/gpu/drm/i915/i915_drv.h > @@ -1259,8 +1259,8 @@ typedef struct drm_i915_private { > /* DPIO indirect register protection */ > struct mutex dpio_lock; > > - /** Cached value of IMR to avoid reads in updating the bitfield */ > - u32 irq_mask; > + /** Cached value of IMR/IER to avoid reads in updating the bitfield */ > + u32 irq_mask, irq_enable; > u32 gt_irq_mask; > u32 pm_irq_mask; > > diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c > index d9eebca..76725c6 100644 > --- a/drivers/gpu/drm/i915/i915_irq.c > +++ b/drivers/gpu/drm/i915/i915_irq.c > @@ -1428,7 +1428,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) > { > struct drm_device *dev = (struct drm_device *) arg; > drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; > - u32 de_iir, gt_iir, de_ier, sde_ier = 0; > + u32 de_iir, gt_iir; > irqreturn_t ret = IRQ_NONE; > > atomic_inc(&dev_priv->irq_received); > @@ -1438,20 +1438,15 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) > intel_uncore_check_errors(dev); > > /* disable master interrupt before clearing iir */ > - de_ier = I915_READ(DEIER); > - I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); > - POSTING_READ(DEIER); > + I915_WRITE(DEIER, dev_priv->irq_enable & ~DE_MASTER_IRQ_CONTROL); > > /* Disable south interrupts. We'll only write to SDEIIR once, so further > * interrupts will will be stored on its back queue, and then we'll be > * able to process them after we restore SDEIER (as soon as we restore > * it, we'll get an interrupt if SDEIIR still has something to process > * due to its back queue). */ > - if (!HAS_PCH_NOP(dev)) { > - sde_ier = I915_READ(SDEIER); > + if (!HAS_PCH_NOP(dev)) > I915_WRITE(SDEIER, 0); > - POSTING_READ(SDEIER); > - } > > gt_iir = I915_READ(GTIIR); > if (gt_iir) { > @@ -1482,12 +1477,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) > } > } > > - I915_WRITE(DEIER, de_ier); > + if (!HAS_PCH_NOP(dev)) > + I915_WRITE(SDEIER, ~0); Please don't change the relative order of DEIER and SDEIER. As far as I remember, this order is required to prevent the complete stop of SDEIER interrupts when we're getting too many of them (e.g., underruns). > + I915_WRITE(DEIER, dev_priv->irq_enable); > POSTING_READ(DEIER); > - if (!HAS_PCH_NOP(dev)) { > - I915_WRITE(SDEIER, sde_ier); > - POSTING_READ(SDEIER); > - } > > return ret; > } > @@ -2343,11 +2336,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev) > } > > dev_priv->irq_mask = ~display_mask; > + dev_priv->irq_enable = display_mask | extra_mask; > > /* should always can generate irq */ > I915_WRITE(DEIIR, I915_READ(DEIIR)); > I915_WRITE(DEIMR, dev_priv->irq_mask); > - I915_WRITE(DEIER, display_mask | extra_mask); > + I915_WRITE(DEIER, dev_priv->irq_enable); > POSTING_READ(DEIER); > > gen5_gt_irq_postinstall(dev); > -- > 1.8.4.rc3 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@xxxxxxxxxxxxxxxxxxxxx > http://lists.freedesktop.org/mailman/listinfo/intel-gfx -- Paulo Zanoni _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx