With GuC based SLPC, frequency control will be moved to GuC and Host will continue to control RC6 and Ring frequency setup. SLPC can be enabled in the GuC setup path and can happen in parallel in GuC with other i915 setup. Hence we can do away with deferred RPS enabling. This needs separate handling of RPS, RC6 and ring frequencies in driver flows. We can still use the *gt_powersave routines with separate status variables of RPS, RC6 and SLPC. With this patch, RC6 and ring frequencies setup(if applicable) can be tracked through rps.rc6_enabled and RPS through rps.rps_enabled. Also, Active RPS check in suspend flow is needed for platforms with RC6 and RPS enabling/disabling coupled together. RPM suspend depends only on RC6 though. Hence Active RPS check is done only for non-Gen9 platforms. v2: Changing parameter to dev_priv for IS_GEN9 and HAS_RUNTIME_PM and line spacing changes. (David) and commit message update for checkpatch issues. v3: Rebase. v4: Commit message update. v5: Updated intel_enable_gt_powersave and intel_disable_gt_powersave routines with separated RPS and RC6 handling and rebase. Commit message update.(Sagar) v6: Added comments at the definition of rc6_enabled. v7: s/rps.enabled/rps.rps_enabled. With gen9 preproduction RPS disabling changes removed, updating rps_enabled in enable/disable_gt_powersave. Added checks for rc6_enabled and rps_enabled for gen9+ platforms. Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_debugfs.c | 4 +- drivers/gpu/drm/i915/i915_drv.c | 9 +++- drivers/gpu/drm/i915/i915_drv.h | 9 +++- drivers/gpu/drm/i915/intel_pm.c | 88 ++++++++++++++++++++++++++----------- 4 files changed, 80 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 48572b1..b3fb7350 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2252,7 +2252,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) struct drm_device *dev = &dev_priv->drm; struct drm_file *file; - seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); + seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.rps_enabled); seq_printf(m, "GPU busy? %s [%d requests]\n", yesno(dev_priv->gt.awake), dev_priv->gt.active_requests); seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); @@ -2288,7 +2288,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) mutex_unlock(&dev->filelist_mutex); if (INTEL_GEN(dev_priv) >= 6 && - dev_priv->rps.enabled && + dev_priv->rps.rps_enabled && dev_priv->gt.active_requests) { u32 rpup, rpupei; u32 rpdown, rpdownei; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b6cc2fe..54ccd13 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -2468,9 +2468,16 @@ static int intel_runtime_suspend(struct device *kdev) struct drm_i915_private *dev_priv = to_i915(dev); int ret; - if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6()))) + if (WARN_ON_ONCE(!intel_enable_rc6())) return -ENODEV; + if (INTEL_GEN(dev_priv) >= 9) { + if (WARN_ON_ONCE(!dev_priv->rps.rc6_enabled)) + return -ENODEV; + } else if (WARN_ON_ONCE(!dev_priv->rps.rps_enabled)) { + return -ENODEV; + } + if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) return -ENODEV; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index db684dc..57fe045 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1336,7 +1336,12 @@ struct intel_gen6_power_mgmt { int last_adj; enum { LOW_POWER, BETWEEN, HIGH_POWER } power; - bool enabled; + /* + * For platforms prior to Gen9, RPS and RC6 status is tracked through + * "rps_enabled". For Gen9+, RC6 is tracked through "rc6_enabled". + */ + bool rps_enabled; + bool rc6_enabled; struct delayed_work autoenable_work; atomic_t num_waiters; atomic_t boosts; @@ -2376,7 +2381,7 @@ struct drm_i915_private { /* Cannot be determined by PCIID. You must always read a register. */ u32 edram_cap; - /* gen6+ rps state */ + /* gen6+ rps/rc6 state */ struct intel_gen6_power_mgmt rps; /* ilk-only ips/rps state. Everything in here is protected by the global diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index b0ba5a1..a1b32e1 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -6118,7 +6118,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) void gen6_rps_busy(struct drm_i915_private *dev_priv) { mutex_lock(&dev_priv->rps.hw_lock); - if (dev_priv->rps.enabled) { + if (dev_priv->rps.rps_enabled) { u8 freq; if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED) @@ -6153,7 +6153,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) gen6_disable_rps_interrupts(dev_priv); mutex_lock(&dev_priv->rps.hw_lock); - if (dev_priv->rps.enabled) { + if (dev_priv->rps.rps_enabled) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) vlv_set_rps_idle(dev_priv); else @@ -6175,7 +6175,7 @@ void gen6_rps_boost(struct drm_i915_gem_request *rq, /* This is intentionally racy! We peek at the state here, then * validate inside the RPS worker. */ - if (!i915->rps.enabled) + if (!i915->rps.rps_enabled) return; boost = false; @@ -6203,7 +6203,7 @@ int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) GEM_BUG_ON(val > dev_priv->rps.max_freq); GEM_BUG_ON(val < dev_priv->rps.min_freq); - if (!dev_priv->rps.enabled) { + if (!dev_priv->rps.rps_enabled) { dev_priv->rps.cur_freq = val; return 0; } @@ -6220,6 +6220,8 @@ static void gen9_disable_rc6(struct drm_i915_private *dev_priv) { I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN9_PG_ENABLE, 0); + + dev_priv->rps.rc6_enabled = false; } static void gen9_disable_rps(struct drm_i915_private *dev_priv) @@ -6507,6 +6509,8 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv) I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); + dev_priv->rps.rc6_enabled = true; + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -7808,21 +7812,23 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv) void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) { - dev_priv->rps.enabled = true; /* force disabling */ + dev_priv->rps.rps_enabled = true; /* force disabling */ + dev_priv->rps.rc6_enabled = true; intel_disable_gt_powersave(dev_priv); gen6_reset_rps_interrupts(dev_priv); } -void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) +/* + * This function disables RPS and RC6 for platforms prior to GEN9 and + * disables only RPS for GEN9+. + */ +void __intel_disable_gt_powersave(struct drm_i915_private *dev_priv) { - if (!READ_ONCE(dev_priv->rps.enabled)) + if (!READ_ONCE(dev_priv->rps.rps_enabled)) return; - mutex_lock(&dev_priv->rps.hw_lock); - if (INTEL_GEN(dev_priv) >= 9) { - gen9_disable_rc6(dev_priv); gen9_disable_rps(dev_priv); } else if (IS_CHERRYVIEW(dev_priv)) { cherryview_disable_rps(dev_priv); @@ -7834,33 +7840,40 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) ironlake_disable_drps(dev_priv); } - dev_priv->rps.enabled = false; + dev_priv->rps.rps_enabled = false; +} + +void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) +{ + mutex_lock(&dev_priv->rps.hw_lock); + + if (INTEL_GEN(dev_priv) >= 9) { + if (READ_ONCE(dev_priv->rps.rc6_enabled)) + gen9_disable_rc6(dev_priv); + } + __intel_disable_gt_powersave(dev_priv); + mutex_unlock(&dev_priv->rps.hw_lock); } -void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) +/* + * This function enables RPS and RC6 for platforms prior to GEN9 and + * enables only RPS for GEN9+. + */ +void __intel_enable_gt_powersave(struct drm_i915_private *dev_priv) { /* We shouldn't be disabling as we submit, so this should be less * racy than it appears! */ - if (READ_ONCE(dev_priv->rps.enabled)) + if (READ_ONCE(dev_priv->rps.rps_enabled)) return; - /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(dev_priv)) - return; - - mutex_lock(&dev_priv->rps.hw_lock); - if (IS_CHERRYVIEW(dev_priv)) { cherryview_enable_rps(dev_priv); } else if (IS_VALLEYVIEW(dev_priv)) { valleyview_enable_rps(dev_priv); } else if (INTEL_GEN(dev_priv) >= 9) { - gen9_enable_rc6(dev_priv); gen9_enable_rps(dev_priv); - if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) - gen6_update_ring_freq(dev_priv); } else if (IS_BROADWELL(dev_priv)) { gen8_enable_rps(dev_priv); gen6_update_ring_freq(dev_priv); @@ -7878,10 +7891,35 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq); WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); - dev_priv->rps.enabled = true; + dev_priv->rps.rps_enabled = true; +} + +void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) +{ + /* Powersaving is controlled by the host when inside a VM */ + if (intel_vgpu_active(dev_priv)) + return; + + mutex_lock(&dev_priv->rps.hw_lock); + + if (INTEL_GEN(dev_priv) >= 9) { + if (!READ_ONCE(dev_priv->rps.rc6_enabled)) + gen9_enable_rc6(dev_priv); + if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv)) + gen6_update_ring_freq(dev_priv); + } + __intel_enable_gt_powersave(dev_priv); + mutex_unlock(&dev_priv->rps.hw_lock); } +#define GT_POWERSAVE_ENABLED(dev_priv) \ + (((INTEL_GEN(dev_priv) >= 9) && \ + (READ_ONCE(dev_priv->rps.rps_enabled) && \ + READ_ONCE(dev_priv->rps.rc6_enabled))) || \ + ((INTEL_GEN(dev_priv) < 9) && \ + READ_ONCE(dev_priv->rps.rps_enabled))) + static void __intel_autoenable_gt_powersave(struct work_struct *work) { struct drm_i915_private *dev_priv = @@ -7889,7 +7927,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work) struct intel_engine_cs *rcs; struct drm_i915_gem_request *req; - if (READ_ONCE(dev_priv->rps.enabled)) + if (GT_POWERSAVE_ENABLED(dev_priv)) goto out; rcs = dev_priv->engine[RCS]; @@ -7919,7 +7957,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work) void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv) { - if (READ_ONCE(dev_priv->rps.enabled)) + if (GT_POWERSAVE_ENABLED(dev_priv)) return; if (IS_IRONLAKE_M(dev_priv)) { -- 1.9.1 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx