From: Jeff McGee <jeff.mcgee@xxxxxxxxx> RPS manual mode disables/ignores load-based inputs and allows render performance state to be controlled externally. The enabling of manual mode and setting of desired frequency is done through debugfs. i915_rps_manual: '0' - RPS controlled normally using load metrics. '1' - RPS controlled manually via i915_cur_freq writes. i915_cur_freq: u64 - Value is the current gpu frequency request in MHz. Writes accepted only if i915_rps_manual = 1. Supports Gen6+ except Valleyview and Broadwell. Signed-off-by: Jeff McGee <jeff.mcgee@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_debugfs.c | 111 ++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 2 + drivers/gpu/drm/i915/i915_irq.c | 6 ++ drivers/gpu/drm/i915/intel_pm.c | 42 ++++++++++++-- 4 files changed, 156 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index bc8707f..c6d4da0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3322,6 +3322,115 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_min_freq_fops, i915_min_freq_get, i915_min_freq_set, "%llu\n"); +static int i915_cur_freq_get(void *data, u64 *val) +{ + struct drm_device *dev = data; + drm_i915_private_t *dev_priv = dev->dev_private; + + if ((INTEL_INFO(dev)->gen < 6) || + IS_VALLEYVIEW(dev) || + IS_BROADWELL(dev)) + return -ENODEV; + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + *val = dev_priv->rps.cur_delay * GT_FREQUENCY_MULTIPLIER; + + return 0; +} + +static int i915_cur_freq_set(void *data, u64 val) +{ + struct drm_device *dev = data; + struct drm_i915_private *dev_priv = dev->dev_private; + u64 freq = val; + int ret; + + if ((INTEL_INFO(dev)->gen < 6) || + IS_VALLEYVIEW(dev) || + IS_BROADWELL(dev)) + return -ENODEV; + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + if (ret) + return ret; + + /* Must be in manual mode to guarantee setting will persist. */ + if (!dev_priv->rps.manual_mode) { + mutex_unlock(&dev_priv->rps.hw_lock); + return -EINVAL; + } + + do_div(val, GT_FREQUENCY_MULTIPLIER); + + if (val < dev_priv->rps.min_delay || val > dev_priv->rps.max_delay) { + mutex_unlock(&dev_priv->rps.hw_lock); + return -EINVAL; + } + + DRM_DEBUG_DRIVER("Setting current freq to %llu\n", freq); + + gen6_set_rps(dev, val); + + mutex_unlock(&dev_priv->rps.hw_lock); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_cur_freq_fops, + i915_cur_freq_get, i915_cur_freq_set, + "%llu\n"); + +static int i915_rps_manual_get(void *data, u64 *val) +{ + struct drm_device *dev = data; + drm_i915_private_t *dev_priv = dev->dev_private; + + if ((INTEL_INFO(dev)->gen < 6) || + IS_VALLEYVIEW(dev) || + IS_BROADWELL(dev)) + return -ENODEV; + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + *val = dev_priv->rps.manual_mode; + + return 0; +} + +static int i915_rps_manual_set(void *data, u64 val) +{ + struct drm_device *dev = data; + struct drm_i915_private *dev_priv = dev->dev_private; + int ret; + + if ((INTEL_INFO(dev)->gen < 6) || + IS_VALLEYVIEW(dev) || + IS_BROADWELL(dev)) + return -ENODEV; + + flush_delayed_work(&dev_priv->rps.delayed_resume_work); + + DRM_DEBUG_DRIVER("Setting RPS mode to %s\n", + val ? "manual" : "normal"); + + ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); + if (ret) + return ret; + + gen6_set_rps_mode(dev, val); + + mutex_unlock(&dev_priv->rps.hw_lock); + + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_rps_manual_fops, + i915_rps_manual_get, i915_rps_manual_set, + "%llu\n"); + static int i915_cache_sharing_get(void *data, u64 *val) { @@ -3496,6 +3605,8 @@ static const struct i915_debugfs_files { {"i915_wedged", &i915_wedged_fops}, {"i915_max_freq", &i915_max_freq_fops}, {"i915_min_freq", &i915_min_freq_fops}, + {"i915_cur_freq", &i915_cur_freq_fops}, + {"i915_rps_manual", &i915_rps_manual_fops}, {"i915_cache_sharing", &i915_cache_sharing_fops}, {"i915_ring_stop", &i915_ring_stop_fops}, {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fa37dfd..73fd646 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -968,6 +968,7 @@ struct intel_gen6_power_mgmt { int last_adj; enum { LOW_POWER, BETWEEN, HIGH_POWER } power; + bool manual_mode; bool enabled; struct delayed_work delayed_resume_work; @@ -2536,6 +2537,7 @@ extern bool intel_fbc_enabled(struct drm_device *dev); extern void intel_disable_fbc(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); extern void intel_init_pch_refclk(struct drm_device *dev); +extern void gen6_set_rps_mode(struct drm_device *dev, bool manual); extern void gen6_set_rps(struct drm_device *dev, u8 val); extern void valleyview_set_rps(struct drm_device *dev, u8 val); extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index b226ae6..7b04949 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1045,6 +1045,12 @@ static void gen6_pm_rps_work(struct work_struct *work) mutex_lock(&dev_priv->rps.hw_lock); + /* May have just entered manual mode. */ + if (dev_priv->rps.manual_mode) { + mutex_unlock(&dev_priv->rps.hw_lock); + return; + } + adj = dev_priv->rps.last_adj; if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { if (adj > 0) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 3c79b63..cfdf5f0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3014,7 +3014,8 @@ void gen6_set_rps(struct drm_device *dev, u8 val) if (val == dev_priv->rps.cur_delay) return; - gen6_set_rps_thresholds(dev_priv, val); + if (!dev_priv->rps.manual_mode) + gen6_set_rps_thresholds(dev_priv, val); if (IS_HASWELL(dev)) I915_WRITE(GEN6_RPNSWREQ, @@ -3038,12 +3039,44 @@ void gen6_set_rps(struct drm_device *dev, u8 val) trace_intel_gpu_freq_change(val * 50); } +void gen6_set_rps_mode(struct drm_device *dev, bool manual) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u8 delay; + + if ((INTEL_INFO(dev)->gen < 6) || + IS_VALLEYVIEW(dev) || + IS_BROADWELL(dev)) { + DRM_DEBUG_DRIVER("RPS mode change not supported\n"); + return; + } + + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); + + dev_priv->rps.manual_mode = manual; + + /* Manual mode disables/ignores load-based inputs and allows render + * performance state to be controlled externally. */ + if (manual) { + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_HW_NORMAL_MODE); + delay = (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8; + } else { + /* Force a reset */ + dev_priv->rps.power = HIGH_POWER; + dev_priv->rps.cur_delay = 0; + delay = dev_priv->rps.min_delay; + } + + gen6_set_rps(dev, delay); +} + void gen6_rps_idle(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; mutex_lock(&dev_priv->rps.hw_lock); - if (dev_priv->rps.enabled) { + if (dev_priv->rps.enabled && !dev_priv->rps.manual_mode) { if (IS_VALLEYVIEW(dev)) valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_delay); else @@ -3058,7 +3091,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv) struct drm_device *dev = dev_priv->dev; mutex_lock(&dev_priv->rps.hw_lock); - if (dev_priv->rps.enabled) { + if (dev_priv->rps.enabled && !dev_priv->rps.manual_mode) { if (IS_VALLEYVIEW(dev)) valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_delay); else @@ -3366,8 +3399,7 @@ static void gen6_enable_rps(struct drm_device *dev) DRM_DEBUG_DRIVER("Failed to set the min frequency\n"); } - dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.min_delay); + gen6_set_rps_mode(dev, dev_priv->rps.manual_mode); gen6_enable_rps_interrupts(dev); -- 1.8.5.2 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/intel-gfx