Having provided for_each_engine_id() for cases where the third (id) argument is useful, we can now replace all the remaining instances with a simpler version that takes only two parameters. In many cases, this also allows the elimination of the local variable used in the iterator (usually 'i'). Signed-off-by: Dave Gordon <david.s.gordon@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_debugfs.c | 50 +++++++++++++----------------- drivers/gpu/drm/i915/i915_drv.h | 17 ++++++---- drivers/gpu/drm/i915/i915_gem.c | 50 +++++++++++++----------------- drivers/gpu/drm/i915/i915_gem_context.c | 6 ++-- drivers/gpu/drm/i915/i915_gem_debug.c | 3 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 9 ++---- drivers/gpu/drm/i915/i915_guc_submission.c | 6 ++-- drivers/gpu/drm/i915/i915_irq.c | 14 +++------ drivers/gpu/drm/i915/intel_guc_loader.c | 8 ++--- drivers/gpu/drm/i915/intel_lrc.c | 3 +- drivers/gpu/drm/i915/intel_pm.c | 19 +++++------- 11 files changed, 82 insertions(+), 103 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 77dce52..d02f8ce 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -398,11 +398,11 @@ static void print_batch_pool_stats(struct seq_file *m, struct drm_i915_gem_object *obj; struct file_stats stats; struct intel_engine_cs *engine; - int i, j; + int j; memset(&stats, 0, sizeof(stats)); - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { list_for_each_entry(obj, &engine->batch_pool.cache_list[j], @@ -638,13 +638,13 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data) struct drm_i915_gem_object *obj; struct intel_engine_cs *engine; int total = 0; - int ret, i, j; + int ret, j; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { int count; @@ -682,14 +682,14 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; struct drm_i915_gem_request *req; - int ret, any, i; + int ret, any; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; any = 0; - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { int count; count = 0; @@ -739,14 +739,14 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int ret, i; + int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; intel_runtime_pm_get(dev_priv); - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) i915_ring_seqno_info(m, engine); intel_runtime_pm_put(dev_priv); @@ -933,7 +933,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Graphics Interrupt mask: %08x\n", I915_READ(GTIMR)); } - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { if (INTEL_INFO(dev)->gen >= 6) { seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", @@ -2044,7 +2044,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; struct intel_context *ctx; - int ret, i; + int ret; if (!i915.enable_execlists) { seq_printf(m, "Logical Ring Contexts are disabled\n"); @@ -2057,7 +2057,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) list_for_each_entry(ctx, &dev_priv->context_list, link) if (ctx != dev_priv->kernel_context) - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) i915_dump_lrc_obj(m, ctx, engine); mutex_unlock(&dev->struct_mutex); @@ -2077,8 +2077,7 @@ static int i915_execlists(struct seq_file *m, void *data) u32 status; u32 ctx_id; struct list_head *cursor; - int ring_id, i; - int ret; + int i, ret; if (!i915.enable_execlists) { seq_puts(m, "Logical Ring Contexts are disabled\n"); @@ -2091,7 +2090,7 @@ static int i915_execlists(struct seq_file *m, void *data) intel_runtime_pm_get(dev_priv); - for_each_engine(engine, dev_priv, ring_id) { + for_each_engine(engine, dev_priv) { struct drm_i915_gem_request *head_req = NULL; int count = 0; unsigned long flags; @@ -2250,12 +2249,12 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - int unused, i; + int i; if (!ppgtt) return; - for_each_engine(engine, dev_priv, unused) { + for_each_engine(engine, dev_priv) { seq_printf(m, "%s\n", engine->name); for (i = 0; i < 4; i++) { u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); @@ -2270,12 +2269,11 @@ static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int i; if (INTEL_INFO(dev)->gen == 6) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { seq_printf(m, "%s\n", engine->name); if (INTEL_INFO(dev)->gen == 7) seq_printf(m, "GFX_MODE: 0x%08x\n", @@ -2342,9 +2340,8 @@ static int count_irq_waiters(struct drm_i915_private *i915) { struct intel_engine_cs *engine; int count = 0; - int i; - for_each_engine(engine, i915, i) + for_each_engine(engine, i915) count += engine->irq_refcount; return count; @@ -2455,7 +2452,6 @@ static void i915_guc_client_info(struct seq_file *m, { struct intel_engine_cs *engine; uint64_t tot = 0; - uint32_t i; seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", client->priority, client->ctx_index, client->proc_desc_offset); @@ -2468,7 +2464,7 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); seq_printf(m, "\tLast submission result: %d\n", client->retcode); - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { seq_printf(m, "\tSubmissions: %llu %s\n", client->submissions[engine->guc_id], engine->name); @@ -2485,7 +2481,6 @@ static int i915_guc_info(struct seq_file *m, void *data) struct intel_guc guc; struct i915_guc_client client = {}; struct intel_engine_cs *engine; - enum intel_engine_id i; u64 total = 0; if (!HAS_GUC_SCHED(dev_priv->dev)) @@ -2508,7 +2503,7 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "GuC last action error code: %d\n", guc.action_err); seq_printf(m, "\nGuC submissions:\n"); - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", engine->name, guc.submissions[engine->guc_id], guc.last_seqno[engine->guc_id]); @@ -3181,7 +3176,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) kunmap_atomic(seqno); } else { seq_puts(m, " Last signal:"); - for_each_engine(engine, dev_priv, id) + for_each_engine(engine, dev_priv) for (j = 0; j < num_rings; j++) seq_printf(m, "0x%08x\n", I915_READ(engine->semaphore.mbox.signal[j])); @@ -3189,11 +3184,10 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) } seq_puts(m, "\nSync seqno:\n"); - for_each_engine(engine, dev_priv, id) { - for (j = 0; j < num_rings; j++) { + for_each_engine(engine, dev_priv) { + for (j = 0; j < num_rings; j++) seq_printf(m, " 0x%08x ", engine->semaphore.sync_seqno[j]); - } seq_putc(m, '\n'); } seq_putc(m, '\n'); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 62ee86c..e462683 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2008,10 +2008,12 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) return container_of(guc, struct drm_i915_private, guc); } -/* Iterate over initialised rings */ -#define for_each_engine(ring__, dev_priv__, i__) \ - for ((i__) = 0; (i__) < I915_NUM_ENGINES; (i__)++) \ - for_each_if ((((ring__) = &(dev_priv__)->engine[(i__)]), intel_engine_initialized((ring__)))) +/* Simple iterator over all initialised engines */ +#define for_each_engine(engine__, dev_priv__) \ + for ((engine__) = &(dev_priv__)->engine[0]; \ + (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ + (engine__)++) \ + for_each_if (intel_engine_initialized(engine__)) /* Iterator with engine_id */ #define for_each_engine_id(engine__, dev_priv__, id__) \ @@ -2023,8 +2025,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) /* Iterator over subset of engines selected by mask */ #define for_each_engine_masked(engine__, dev_priv__, mask__) \ - for ((engine__) = &dev_priv->engine[0]; (engine__) < &dev_priv->engine[I915_NUM_ENGINES]; (engine__)++) \ - for_each_if (intel_engine_flag((engine__)) & (mask__) && intel_engine_initialized((engine__))) + for ((engine__) = &dev_priv->engine[0]; \ + (engine__) < &dev_priv->engine[I915_NUM_ENGINES]; \ + (engine__)++) \ + for_each_if (((mask__) & intel_engine_flag(engine__)) && \ + intel_engine_initialized(engine__)) enum hdmi_force_audio { HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8588c83..c7a997a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2466,10 +2466,10 @@ void i915_vma_move_to_active(struct i915_vma *vma, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int ret, i, j; + int ret, j; /* Carefully retire all requests without writing to the rings */ - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { ret = intel_engine_idle(engine); if (ret) return ret; @@ -2477,7 +2477,7 @@ void i915_vma_move_to_active(struct i915_vma *vma, i915_gem_retire_requests(dev); /* Finally reset hw state */ - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { intel_ring_init_seqno(engine, seqno); for (j = 0; j < ARRAY_SIZE(engine->semaphore.sync_seqno); j++) @@ -2884,17 +2884,16 @@ void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int i; /* * Before we free the objects from the requests, we need to inspect * them for finding the guilty party. As the requests only borrow * their reference to the objects, the inspection must be done first. */ - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) i915_gem_reset_engine_status(dev_priv, engine); - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) i915_gem_reset_engine_cleanup(dev_priv, engine); i915_gem_context_reset(dev); @@ -2962,9 +2961,8 @@ void i915_gem_reset(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; bool idle = true; - int i; - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { i915_gem_retire_requests_ring(engine); idle &= list_empty(&engine->request_list); if (i915.enable_execlists) { @@ -3009,24 +3007,20 @@ void i915_gem_reset(struct drm_device *dev) struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), mm.idle_work.work); struct drm_device *dev = dev_priv->dev; - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; - for_each_engine(ring, dev_priv, i) - if (!list_empty(&ring->request_list)) + for_each_engine(engine, dev_priv) + if (!list_empty(&engine->request_list)) return; /* we probably should sync with hangcheck here, using cancel_work_sync. - * Also locking seems to be fubar here, ring->request_list is protected + * Also locking seems to be fubar here, engine->request_list is protected * by dev->struct_mutex. */ intel_mark_idle(dev); if (mutex_trylock(&dev->struct_mutex)) { - struct intel_engine_cs *engine; - int i; - - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) i915_gem_batch_pool_fini(&engine->batch_pool); mutex_unlock(&dev->struct_mutex); @@ -3390,10 +3384,10 @@ int i915_gpu_idle(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int ret, i; + int ret; /* Flush everything onto the inactive list. */ - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { if (!i915.enable_execlists) { struct drm_i915_gem_request *req; @@ -4655,9 +4649,8 @@ void i915_gem_vma_destroy(struct i915_vma *vma) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int i; - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) dev_priv->gt.stop_engine(engine); } @@ -4828,7 +4821,7 @@ int i915_gem_init_engines(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int ret, i, j; + int ret, j; if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) return -EIO; @@ -4874,7 +4867,7 @@ int i915_gem_init_engines(struct drm_device *dev) } /* Need to do basic initialisation of all rings first: */ - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { ret = engine->init_hw(engine); if (ret) goto out; @@ -4899,7 +4892,7 @@ int i915_gem_init_engines(struct drm_device *dev) goto out; /* Now it is safe to go back round and do everything else: */ - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { struct drm_i915_gem_request *req; req = i915_gem_request_alloc(engine, NULL); @@ -4916,7 +4909,8 @@ int i915_gem_init_engines(struct drm_device *dev) ret = i915_ppgtt_init_ring(req); if (ret && ret != -EIO) { - DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret); + DRM_ERROR("PPGTT enable %s failed %d\n", + engine->name, ret); i915_gem_request_cancel(req); i915_gem_cleanup_engines(dev); goto out; @@ -4924,7 +4918,8 @@ int i915_gem_init_engines(struct drm_device *dev) ret = i915_gem_context_enable(req); if (ret && ret != -EIO) { - DRM_ERROR("Context enable ring #%d failed %d\n", i, ret); + DRM_ERROR("Context enable %s failed %d\n", + engine->name, ret); i915_gem_request_cancel(req); i915_gem_cleanup_engines(dev); goto out; @@ -5005,9 +5000,8 @@ int i915_gem_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int i; - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) dev_priv->gt.cleanup_engine(engine); if (i915.enable_execlists) diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 394e525..fe580cb 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -517,7 +517,7 @@ struct intel_context * i915_semaphore_is_enabled(engine->dev) ? hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : 0; - int len, i, ret; + int len, ret; /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value @@ -553,7 +553,7 @@ struct intel_context * intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, to_i915(engine->dev), i) { + for_each_engine(signaller, to_i915(engine->dev)) { if (signaller == engine) continue; @@ -582,7 +582,7 @@ struct intel_context * intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_engine(signaller, to_i915(engine->dev), i) { + for_each_engine(signaller, to_i915(engine->dev)) { if (signaller == engine) continue; diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index ef9cd70..a565164 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -38,12 +38,11 @@ struct drm_i915_gem_object *obj; struct intel_engine_cs *engine; int err = 0; - int i; if (warned) return 0; - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { list_for_each_entry(obj, &engine->active_list, engine_list[engine->id]) { if (obj->base.dev != dev || diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 0715bb7..59e1821 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1737,9 +1737,8 @@ static void gen8_ppgtt_enable(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int j; - for_each_engine(engine, dev_priv, j) { + for_each_engine(engine, dev_priv) { u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0; I915_WRITE(RING_MODE_GEN7(engine), _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); @@ -1751,7 +1750,6 @@ static void gen7_ppgtt_enable(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; uint32_t ecochk, ecobits; - int i; ecobits = I915_READ(GAC_ECO_BITS); I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); @@ -1765,7 +1763,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev) } I915_WRITE(GAM_ECOCHK, ecochk); - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { /* GFX_MODE is per-ring on gen7+ */ I915_WRITE(RING_MODE_GEN7(engine), _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); @@ -2287,12 +2285,11 @@ void i915_check_and_clear_faults(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int i; if (INTEL_INFO(dev)->gen < 6) return; - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { u32 fault_reg; fault_reg = I915_READ(RING_FAULT_REG(engine)); if (fault_reg & RING_FAULT_VALID) { diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index 0611bdc..da86bdb 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -842,7 +842,7 @@ static void guc_create_ads(struct intel_guc *guc) struct guc_mmio_reg_state *reg_state; struct intel_engine_cs *engine; struct page *page; - u32 size, i; + u32 size; /* The ads obj includes the struct itself and buffers passed to GuC */ size = sizeof(struct guc_ads) + sizeof(struct guc_policies) + @@ -871,7 +871,7 @@ static void guc_create_ads(struct intel_guc *guc) engine = &dev_priv->engine[RCS]; ads->golden_context_lrca = engine->status_page.gfx_addr; - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine); /* GuC scheduling policies */ @@ -884,7 +884,7 @@ static void guc_create_ads(struct intel_guc *guc) /* MMIO reg state */ reg_state = (void *)policies + sizeof(struct guc_policies); - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { reg_state->mmio_white_list[engine->guc_id].mmio_start = engine->mmio_base + GUC_MMIO_WHITE_LIST_START; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 14a23b3..5aa4239 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1080,9 +1080,8 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) static bool any_waiters(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; - int i; - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) if (engine->irq_refcount) return true; @@ -2450,7 +2449,6 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, bool reset_completed) { struct intel_engine_cs *engine; - int i; /* * Notify all waiters for GPU completion events that reset state has @@ -2460,7 +2458,7 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, */ /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) wake_up_all(&engine->irq_queue); /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ @@ -2829,10 +2827,9 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) { struct drm_i915_private *dev_priv = engine->dev->dev_private; struct intel_engine_cs *signaller; - int i; if (INTEL_INFO(dev_priv->dev)->gen >= 8) { - for_each_engine(signaller, dev_priv, i) { + for_each_engine(signaller, dev_priv) { if (engine == signaller) continue; @@ -2842,7 +2839,7 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) } else { u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; - for_each_engine(signaller, dev_priv, i) { + for_each_engine(signaller, dev_priv) { if(engine == signaller) continue; @@ -2958,9 +2955,8 @@ static int semaphore_passed(struct intel_engine_cs *engine) static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; - int i; - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) engine->hangcheck.deadlock = 0; } diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index e1aff62..b4976f9 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -82,12 +82,12 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) static void direct_interrupts_to_host(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; - int i, irqs; + int irqs; /* tell all command streamers NOT to forward interrupts and vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) I915_WRITE(RING_MODE_GEN7(engine), irqs); /* route all GT interrupts to the host */ @@ -99,12 +99,12 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv) static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) { struct intel_engine_cs *engine; - int i, irqs; + int irqs; /* tell all command streamers to forward interrupts and vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) I915_WRITE(RING_MODE_GEN7(engine), irqs); /* route USER_INTERRUPT to Host, all others are sent to GuC. */ diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 40ef4ea..5d4ca3b 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -2673,9 +2673,8 @@ void intel_lr_context_reset(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int i; - for_each_engine(engine, dev_priv, i) { + for_each_engine(engine, dev_priv) { struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; struct intel_ringbuffer *ringbuf = diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 521cf45..6a04761 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4817,7 +4817,6 @@ static void gen9_enable_rc6(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; uint32_t rc6_mask = 0; - int unused; /* 1a: Software RC state - RC0 */ I915_WRITE(GEN6_RC_STATE, 0); @@ -4838,7 +4837,7 @@ static void gen9_enable_rc6(struct drm_device *dev) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv, unused) + for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); if (HAS_GUC_UCODE(dev)) @@ -4887,7 +4886,6 @@ static void gen8_enable_rps(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; uint32_t rc6_mask = 0; - int unused; /* 1a: Software RC state - RC0 */ I915_WRITE(GEN6_RC_STATE, 0); @@ -4906,7 +4904,7 @@ static void gen8_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv, unused) + for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); if (IS_BROADWELL(dev)) @@ -4971,7 +4969,7 @@ static void gen6_enable_rps(struct drm_device *dev) u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; u32 gtfifodbg; int rc6_mode; - int i, ret; + int ret; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -5003,7 +5001,7 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -5497,7 +5495,6 @@ static void cherryview_enable_rps(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0, pcbr; - int i; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -5522,7 +5519,7 @@ static void cherryview_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); @@ -5595,7 +5592,6 @@ static void valleyview_enable_rps(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0; - int i; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -5633,7 +5629,7 @@ static void valleyview_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); @@ -6012,14 +6008,13 @@ bool i915_gpu_busy(void) struct drm_i915_private *dev_priv; struct intel_engine_cs *engine; bool ret = false; - int i; spin_lock_irq(&mchdev_lock); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; - for_each_engine(engine, dev_priv, i) + for_each_engine(engine, dev_priv) ret |= !list_empty(&engine->request_list); out_unlock: -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx