Replace the for_each_ring() macro with for_each_engine() wherever the third (index) argument is (or can be made) unused. This often allows us to delete the associated local variable as well. Cc: Daniel Vetter <daniel@xxxxxxxx> Cc: Jani Nikula <jani.nikula@xxxxxxxxx> Cc: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> Signed-off-by: Dave Gordon <david.s.gordon@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_debugfs.c | 37 +++++++++++++-------------- drivers/gpu/drm/i915/i915_gem.c | 45 ++++++++++++++------------------- drivers/gpu/drm/i915/i915_gem_context.c | 6 ++--- drivers/gpu/drm/i915/i915_gem_debug.c | 3 +-- drivers/gpu/drm/i915/i915_gem_gtt.c | 9 +++---- drivers/gpu/drm/i915/i915_irq.c | 14 ++++------ drivers/gpu/drm/i915/intel_guc_loader.c | 8 +++--- drivers/gpu/drm/i915/intel_lrc.c | 3 +-- drivers/gpu/drm/i915/intel_pm.c | 19 +++++--------- 9 files changed, 60 insertions(+), 84 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 0fc38bb..dd607c3 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -400,11 +400,11 @@ static void print_batch_pool_stats(struct seq_file *m, struct drm_i915_gem_object *obj; struct file_stats stats; struct intel_engine_cs *ring; - int i, j; + int j; memset(&stats, 0, sizeof(stats)); - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { list_for_each_entry(obj, &ring->batch_pool.cache_list[j], @@ -641,13 +641,13 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data) struct drm_i915_gem_object *obj; struct intel_engine_cs *ring; int total = 0; - int ret, i, j; + int ret, j; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { int count; @@ -685,14 +685,14 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; struct drm_i915_gem_request *req; - int ret, any, i; + int ret, any; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; any = 0; - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { int count; count = 0; @@ -742,14 +742,14 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int ret, i; + int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; intel_runtime_pm_get(dev_priv); - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) i915_ring_seqno_info(m, ring); intel_runtime_pm_put(dev_priv); @@ -931,7 +931,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Graphics Interrupt mask: %08x\n", I915_READ(GTIMR)); } - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { if (INTEL_INFO(dev)->gen >= 6) { seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", @@ -1942,7 +1942,7 @@ static int i915_context_status(struct seq_file *m, void *unused) seq_puts(m, "HW context "); describe_ctx(m, ctx); - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { if (ring->default_context == ctx) seq_printf(m, "(default context %s) ", ring->name); @@ -2237,12 +2237,12 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - int unused, i; + int i; if (!ppgtt) return; - for_each_ring(ring, dev_priv, unused) { + for_each_engine(ring, dev_priv) { seq_printf(m, "%s\n", ring->name); for (i = 0; i < 4; i++) { u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i)); @@ -2257,12 +2257,11 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int i; if (INTEL_INFO(dev)->gen == 6) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { seq_printf(m, "%s\n", ring->name); if (INTEL_INFO(dev)->gen == 7) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); @@ -2325,9 +2324,8 @@ static int count_irq_waiters(struct drm_i915_private *i915) { struct intel_engine_cs *ring; int count = 0; - int i; - for_each_ring(ring, i915, i) + for_each_engine(ring, i915) count += ring->irq_refcount; return count; @@ -3159,7 +3157,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) kunmap_atomic(seqno); } else { seq_puts(m, " Last signal:"); - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) for (j = 0; j < num_rings; j++) seq_printf(m, "0x%08x\n", I915_READ(ring->semaphore.mbox.signal[j])); @@ -3167,10 +3165,9 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) } seq_puts(m, "\nSync seqno:\n"); - for_each_ring(ring, dev_priv, i) { - for (j = 0; j < num_rings; j++) { + for_each_engine(ring, dev_priv) { + for (j = 0; j < num_rings; j++) seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); - } seq_putc(m, '\n'); } seq_putc(m, '\n'); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6c60e04..1766f9d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2466,10 +2466,10 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int ret, i, j; + int ret, j; /* Carefully retire all requests without writing to the rings */ - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { ret = intel_ring_idle(ring); if (ret) return ret; @@ -2477,7 +2477,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno) i915_gem_retire_requests(dev); /* Finally reset hw state */ - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { intel_ring_init_seqno(ring, seqno); for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++) @@ -2860,17 +2860,16 @@ void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int i; /* * Before we free the objects from the requests, we need to inspect * them for finding the guilty party. As the requests only borrow * their reference to the objects, the inspection must be done first. */ - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) i915_gem_reset_ring_status(dev_priv, ring); - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) i915_gem_reset_ring_cleanup(dev_priv, ring); i915_gem_context_reset(dev); @@ -2938,9 +2937,8 @@ i915_gem_retire_requests(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; bool idle = true; - int i; - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { i915_gem_retire_requests_ring(ring); idle &= list_empty(&ring->request_list); if (i915.enable_execlists) { @@ -2988,9 +2986,8 @@ i915_gem_idle_work_handler(struct work_struct *work) container_of(work, typeof(*dev_priv), mm.idle_work.work); struct drm_device *dev = dev_priv->dev; struct intel_engine_cs *ring; - int i; - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) if (!list_empty(&ring->request_list)) return; @@ -3001,12 +2998,8 @@ i915_gem_idle_work_handler(struct work_struct *work) intel_mark_idle(dev); if (mutex_trylock(&dev->struct_mutex)) { - struct intel_engine_cs *ring; - int i; - - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) i915_gem_batch_pool_fini(&ring->batch_pool); - mutex_unlock(&dev->struct_mutex); } } @@ -3365,10 +3358,10 @@ int i915_gpu_idle(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int ret, i; + int ret; /* Flush everything onto the inactive list. */ - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { if (!i915.enable_execlists) { struct drm_i915_gem_request *req; @@ -4622,9 +4615,8 @@ i915_gem_stop_ringbuffers(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int i; - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) dev_priv->gt.stop_ring(ring); } @@ -4795,7 +4787,7 @@ i915_gem_init_hw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int ret, i, j; + int ret, j; if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) return -EIO; @@ -4841,7 +4833,7 @@ i915_gem_init_hw(struct drm_device *dev) } /* Need to do basic initialisation of all rings first: */ - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { ret = ring->init_hw(ring); if (ret) goto out; @@ -4866,7 +4858,7 @@ i915_gem_init_hw(struct drm_device *dev) goto out; /* Now it is safe to go back round and do everything else: */ - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { struct drm_i915_gem_request *req; WARN_ON(!ring->default_context); @@ -4884,7 +4876,8 @@ i915_gem_init_hw(struct drm_device *dev) ret = i915_ppgtt_init_ring(req); if (ret && ret != -EIO) { - DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret); + DRM_ERROR("PPGTT enable %s failed %d\n", + ring->name, ret); i915_gem_request_cancel(req); i915_gem_cleanup_ringbuffer(dev); goto out; @@ -4892,7 +4885,8 @@ i915_gem_init_hw(struct drm_device *dev) ret = i915_gem_context_enable(req); if (ret && ret != -EIO) { - DRM_ERROR("Context enable ring #%d failed %d\n", i, ret); + DRM_ERROR("Context enable %s failed %d\n", + ring->name, ret); i915_gem_request_cancel(req); i915_gem_cleanup_ringbuffer(dev); goto out; @@ -4973,9 +4967,8 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int i; - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) dev_priv->gt.cleanup_ring(ring); if (i915.enable_execlists) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 900ffd0..ff66a94 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -522,7 +522,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) i915_semaphore_is_enabled(ring->dev) ? hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 : 0; - int len, i, ret; + int len, ret; /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value @@ -557,7 +557,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) struct intel_engine_cs *signaller; intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_ring(signaller, to_i915(ring->dev), i) { + for_each_engine(signaller, to_i915(ring->dev)) { if (signaller == ring) continue; @@ -582,7 +582,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) struct intel_engine_cs *signaller; intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_ring(signaller, to_i915(ring->dev), i) { + for_each_engine(signaller, to_i915(ring->dev)) { if (signaller == ring) continue; diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 17299d0..12bace9 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -38,12 +38,11 @@ i915_verify_lists(struct drm_device *dev) struct drm_i915_gem_object *obj; struct intel_engine_cs *ring; int err = 0; - int i; if (warned) return 0; - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) { if (obj->base.dev != dev || !atomic_read(&obj->base.refcount.refcount)) { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 56f4f2e..0b0beaf 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1735,9 +1735,8 @@ static void gen8_ppgtt_enable(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int j; - for_each_ring(ring, dev_priv, j) { + for_each_engine(ring, dev_priv) { u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0; I915_WRITE(RING_MODE_GEN7(ring), _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); @@ -1749,7 +1748,6 @@ static void gen7_ppgtt_enable(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; uint32_t ecochk, ecobits; - int i; ecobits = I915_READ(GAC_ECO_BITS); I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); @@ -1763,7 +1761,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev) } I915_WRITE(GAM_ECOCHK, ecochk); - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { /* GFX_MODE is per-ring on gen7+ */ I915_WRITE(RING_MODE_GEN7(ring), _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); @@ -2264,12 +2262,11 @@ void i915_check_and_clear_faults(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int i; if (INTEL_INFO(dev)->gen < 6) return; - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { u32 fault_reg; fault_reg = I915_READ(RING_FAULT_REG(ring)); if (fault_reg & RING_FAULT_VALID) { diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3f8c753..622e9a4 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1080,9 +1080,8 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) static bool any_waiters(struct drm_i915_private *dev_priv) { struct intel_engine_cs *ring; - int i; - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) if (ring->irq_refcount) return true; @@ -2432,7 +2431,6 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, bool reset_completed) { struct intel_engine_cs *ring; - int i; /* * Notify all waiters for GPU completion events that reset state has @@ -2442,7 +2440,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, */ /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) wake_up_all(&ring->irq_queue); /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ @@ -2810,10 +2808,9 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 of { struct drm_i915_private *dev_priv = ring->dev->dev_private; struct intel_engine_cs *signaller; - int i; if (INTEL_INFO(dev_priv->dev)->gen >= 8) { - for_each_ring(signaller, dev_priv, i) { + for_each_engine(signaller, dev_priv) { if (ring == signaller) continue; @@ -2823,7 +2820,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 of } else { u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; - for_each_ring(signaller, dev_priv, i) { + for_each_engine(signaller, dev_priv) { if(ring == signaller) continue; @@ -2939,9 +2936,8 @@ static int semaphore_passed(struct intel_engine_cs *ring) static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) { struct intel_engine_cs *ring; - int i; - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) ring->hangcheck.deadlock = 0; } diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 550921f..2b34074a 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -82,12 +82,12 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) static void direct_interrupts_to_host(struct drm_i915_private *dev_priv) { struct intel_engine_cs *ring; - int i, irqs; + int irqs; /* tell all command streamers NOT to forward interrupts and vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) I915_WRITE(RING_MODE_GEN7(ring), irqs); /* route all GT interrupts to the host */ @@ -99,12 +99,12 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv) static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) { struct intel_engine_cs *ring; - int i, irqs; + int irqs; /* tell all command streamers to forward interrupts and vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) I915_WRITE(RING_MODE_GEN7(ring), irqs); /* route USER_INTERRUPT to Host, all others are sent to GuC. */ diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index ca5c0e8..3a865a0 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2516,9 +2516,8 @@ void intel_lr_context_reset(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; - int i; - for_each_ring(ring, dev_priv, i) { + for_each_engine(ring, dev_priv) { struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; struct intel_ringbuffer *ringbuf = diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 02fe081..af7644b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4657,7 +4657,6 @@ static void gen9_enable_rc6(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; uint32_t rc6_mask = 0; - int unused; /* 1a: Software RC state - RC0 */ I915_WRITE(GEN6_RC_STATE, 0); @@ -4678,7 +4677,7 @@ static void gen9_enable_rc6(struct drm_device *dev) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_ring(ring, dev_priv, unused) + for_each_engine(ring, dev_priv) I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); if (HAS_GUC_UCODE(dev)) @@ -4728,7 +4727,6 @@ static void gen8_enable_rps(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; uint32_t rc6_mask = 0; - int unused; /* 1a: Software RC state - RC0 */ I915_WRITE(GEN6_RC_STATE, 0); @@ -4747,7 +4745,7 @@ static void gen8_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_ring(ring, dev_priv, unused) + for_each_engine(ring, dev_priv) I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); if (IS_BROADWELL(dev)) @@ -4812,7 +4810,7 @@ static void gen6_enable_rps(struct drm_device *dev) u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; u32 gtfifodbg; int rc6_mode; - int i, ret; + int ret; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -4844,7 +4842,7 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -5339,7 +5337,6 @@ static void cherryview_enable_rps(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; u32 gtfifodbg, val, rc6_mode = 0, pcbr; - int i; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -5364,7 +5361,7 @@ static void cherryview_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -5437,7 +5434,6 @@ static void valleyview_enable_rps(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *ring; u32 gtfifodbg, val, rc6_mode = 0; - int i; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -5475,7 +5471,7 @@ static void valleyview_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); @@ -5854,14 +5850,13 @@ bool i915_gpu_busy(void) struct drm_i915_private *dev_priv; struct intel_engine_cs *ring; bool ret = false; - int i; spin_lock_irq(&mchdev_lock); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; - for_each_ring(ring, dev_priv, i) + for_each_engine(ring, dev_priv) ret |= !list_empty(&ring->request_list); out_unlock: -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx