Place the RPS counters inside the RPS struct. Signed-off-by: Chris Wilson <chris@xxxxxxxxxxxxxxxxxx> --- drivers/gpu/drm/i915/i915_drv.h | 18 +++++++----------- drivers/gpu/drm/i915/i915_irq.c | 32 ++++++++++++++++---------------- 2 files changed, 23 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fed405d1a7eb..daee71ef201d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -907,10 +907,10 @@ struct vlv_s0ix_state { u32 clock_gate_dis2; }; -struct intel_rps_ei_calc { - u32 cz_ts_ei; - u32 render_ei_c0; - u32 media_ei_c0; +struct intel_rps_ei { + u32 cz_clock; + u32 render_c0; + u32 media_c0; }; struct intel_gen6_power_mgmt { @@ -946,6 +946,9 @@ struct intel_gen6_power_mgmt { struct delayed_work delayed_resume_work; struct work_struct boost_work; + /* manual wa residency calculations */ + struct intel_rps_ei up_ei, down_ei; + /* * Protects RPS/RC6 register access and PCU communication. * Must be taken after struct_mutex if nested. @@ -1548,13 +1551,6 @@ struct drm_i915_private { /* gen6+ rps state */ struct intel_gen6_power_mgmt rps; - /* rps wa up ei calculation */ - struct intel_rps_ei_calc rps_up_ei; - - /* rps wa down ei calculation */ - struct intel_rps_ei_calc rps_down_ei; - - /* ilk-only ips/rps state. Everything in here is protected by the global * mchdev_lock in intel_pm.c */ struct intel_ilk_power_mgmt ips; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a2d50980b827..8e19d031c05d 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1283,7 +1283,7 @@ static void notify_ring(struct drm_device *dev, } static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, - struct intel_rps_ei_calc *rps_ei) + struct intel_rps_ei *rps_ei) { u32 cz_ts, cz_freq_khz; u32 render_count, media_count; @@ -1296,22 +1296,22 @@ static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); - if (rps_ei->cz_ts_ei == 0) { - rps_ei->cz_ts_ei = cz_ts; - rps_ei->render_ei_c0 = render_count; - rps_ei->media_ei_c0 = media_count; + if (rps_ei->cz_clock == 0) { + rps_ei->cz_clock = cz_ts; + rps_ei->render_c0 = render_count; + rps_ei->media_c0 = media_count; return dev_priv->rps.cur_freq; } - elapsed_time = cz_ts - rps_ei->cz_ts_ei; - rps_ei->cz_ts_ei = cz_ts; + elapsed_time = cz_ts - rps_ei->cz_clock; + rps_ei->cz_clock = cz_ts; - elapsed_render = render_count - rps_ei->render_ei_c0; - rps_ei->render_ei_c0 = render_count; + elapsed_render = render_count - rps_ei->render_c0; + rps_ei->render_c0 = render_count; - elapsed_media = media_count - rps_ei->media_ei_c0; - rps_ei->media_ei_c0 = media_count; + elapsed_media = media_count - rps_ei->media_c0; + rps_ei->media_c0 = media_count; /* Convert all the counters into common unit of milli sec */ elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; @@ -1347,9 +1347,9 @@ static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - if (dev_priv->rps_up_ei.cz_ts_ei == 0) { - vlv_c0_residency(dev_priv, &dev_priv->rps_up_ei); - vlv_c0_residency(dev_priv, &dev_priv->rps_down_ei); + if (dev_priv->rps.up_ei.cz_clock == 0) { + vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); + vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); return dev_priv->rps.cur_freq; } @@ -1364,10 +1364,10 @@ static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) dev_priv->rps.ei_interrupt_count = 0; residency_C0_down = vlv_c0_residency(dev_priv, - &dev_priv->rps_down_ei); + &dev_priv->rps.down_ei); } else { residency_C0_up = vlv_c0_residency(dev_priv, - &dev_priv->rps_up_ei); + &dev_priv->rps.up_ei); } new_delay = dev_priv->rps.cur_freq; -- 2.0.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx