Currently tests modify i915.enable_psr and then do a modeset cycle to change PSR. We can write a value to i915_edp_psr_status to force a certain value without a modeset. To retain compatibility with older userspace, we also still allow the override through the module parameter, and add some tracking to check whether a debugfs mode is specified. Changes since v1: - Rename dev_priv->psr.enabled to .dp, and .hw_configured to .enabled. - Fix i915_psr_debugfs_mode to match the writes to debugfs. - Rename __i915_edp_psr_write to intel_psr_set_debugfs_mode, simplify it and move it to intel_psr.c. This keeps all internals in intel_psr.c - Perform an interruptible wait for hw completion outside of the psr lock, instead of being forced to trywait and return -EBUSY. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@xxxxxxxxxxxxxxx> Cc: Rodrigo Vivi <rodrigo.vivi@xxxxxxxxx> --- drivers/gpu/drm/i915/i915_debugfs.c | 73 +++++++++- drivers/gpu/drm/i915/i915_drv.h | 11 +- drivers/gpu/drm/i915/intel_drv.h | 3 + drivers/gpu/drm/i915/intel_psr.c | 262 +++++++++++++++++++++++++++--------- 4 files changed, 281 insertions(+), 68 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 574fcf234007..98e169636f86 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -2546,16 +2546,13 @@ static const char *psr2_live_status(u32 val) static int i915_edp_psr_status(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_i915_private *dev_priv = m->private; u32 psrperf = 0; u32 stat[3]; enum pipe pipe; bool enabled = false; bool sink_support; - if (!HAS_PSR(dev_priv)) - return -ENODEV; - sink_support = dev_priv->psr.sink_support; seq_printf(m, "Sink_Support: %s\n", yesno(sink_support)); if (!sink_support) @@ -2564,7 +2561,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->psr.lock); - seq_printf(m, "Enabled: %s\n", yesno((bool)dev_priv->psr.enabled)); + seq_printf(m, "Enabled: %s\n", yesno(dev_priv->psr.enabled)); seq_printf(m, "Busy frontbuffer bits: 0x%03x\n", dev_priv->psr.busy_frontbuffer_bits); seq_printf(m, "Re-enable work scheduled: %s\n", @@ -2631,6 +2628,70 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) return 0; } +static ssize_t i915_edp_psr_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct drm_i915_private *dev_priv = m->private; + struct drm_modeset_acquire_ctx ctx; + int ret, val; + + if (!dev_priv->psr.sink_support) + return -ENODEV; + + ret = kstrtoint_from_user(ubuf, len, 10, &val); + if (ret < 0) { + bool enable; + ret = kstrtobool_from_user(ubuf, len, &enable); + + if (ret < 0) + return ret; + + val = enable; + } + + if (val < -1 || val > 3) + return -EINVAL; + + intel_runtime_pm_get(dev_priv); + + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); + +retry: + ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val); + if (ret == -EBUSY) { + ret = drm_modeset_backoff(&ctx); + if (!ret) + goto retry; + } + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + intel_runtime_pm_put(dev_priv); + + return ret ?: len; +} + +static int i915_edp_psr_open(struct inode *inode, struct file *file) +{ + struct drm_i915_private *dev_priv = inode->i_private; + + if (!HAS_PSR(dev_priv)) + return -ENODEV; + + return single_open(file, i915_edp_psr_status, dev_priv); +} + +static const struct file_operations i915_edp_psr_ops = { + .owner = THIS_MODULE, + .open = i915_edp_psr_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = i915_edp_psr_write +}; + static int i915_sink_crc(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -4734,7 +4795,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_swizzle_info", i915_swizzle_info, 0}, {"i915_ppgtt_info", i915_ppgtt_info, 0}, {"i915_llc", i915_llc, 0}, - {"i915_edp_psr_status", i915_edp_psr_status, 0}, {"i915_sink_crc_eDP1", i915_sink_crc, 0}, {"i915_energy_uJ", i915_energy_uJ, 0}, {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, @@ -4761,6 +4821,7 @@ static const struct i915_debugfs_files { {"i915_wedged", &i915_wedged_fops}, {"i915_max_freq", &i915_max_freq_fops}, {"i915_min_freq", &i915_min_freq_fops}, + {"i915_edp_psr_status", &i915_edp_psr_ops}, {"i915_cache_sharing", &i915_cache_sharing_fops}, {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, {"i915_ring_test_irq", &i915_ring_test_irq_fops}, diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b39c5f68efb2..9262cfb8aac2 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -596,7 +596,8 @@ struct i915_drrs { struct i915_psr { struct mutex lock; bool sink_support; - struct intel_dp *enabled; + bool enabled; + struct intel_dp *dp; bool active; struct delayed_work work; unsigned busy_frontbuffer_bits; @@ -608,6 +609,14 @@ struct i915_psr { bool alpm; bool has_hw_tracking; + enum i915_psr_debugfs_mode { + PSR_DEBUGFS_MODE_DEFAULT = -1, + PSR_DEBUGFS_MODE_DISABLED, + PSR_DEBUGFS_MODE_ENABLED, + PSR_DEBUGFS_MODE_ENABLED_FORCE_LINK_STANDBY, + PSR_DEBUGFS_MODE_ENABLED_NO_LINK_STANDBY + } debugfs_mode; + void (*enable_source)(struct intel_dp *, const struct intel_crtc_state *); void (*disable_source)(struct intel_dp *, diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 1f0e8f1e4594..af3c5578d2ea 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1876,6 +1876,9 @@ void intel_psr_enable(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state); +int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, + struct drm_modeset_acquire_ctx *ctx, + enum i915_psr_debugfs_mode mode); void intel_psr_invalidate(struct drm_i915_private *dev_priv, unsigned frontbuffer_bits, enum fb_op_origin origin); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 317cb4a12693..3dc0eda8efe0 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -56,6 +56,52 @@ #include "intel_drv.h" #include "i915_drv.h" +static bool intel_psr_default_link_standby(struct drm_i915_private *dev_priv) +{ + /* Override link_standby x link_off defaults */ + if (i915_modparams.enable_psr == 2) { + DRM_DEBUG_KMS("PSR: Forcing link standby\n"); + return true; + } + + if (i915_modparams.enable_psr == 3) { + DRM_DEBUG_KMS("PSR: Forcing main link off\n"); + return false; + } + + /* Set link_standby x link_off defaults */ + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + /* HSW and BDW require workarounds that we don't implement. */ + return false; + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + /* On VLV and CHV only standby mode is supported. */ + return true; + else + /* For new platforms let's respect VBT back again */ + return dev_priv->vbt.psr.full_link; +} + +static bool get_link_standby_for_mode(struct drm_i915_private *dev_priv, + enum i915_psr_debugfs_mode mode) +{ + if (mode == PSR_DEBUGFS_MODE_ENABLED_FORCE_LINK_STANDBY) + return true; + else if (mode == PSR_DEBUGFS_MODE_ENABLED_NO_LINK_STANDBY) + return false; + else + return intel_psr_default_link_standby(dev_priv); +} + +static bool psr_global_enabled(enum i915_psr_debugfs_mode mode) +{ + if (mode == PSR_DEBUGFS_MODE_DEFAULT) + return i915_modparams.enable_psr; + else if (mode == PSR_DEBUGFS_MODE_DISABLED) + return false; + else + return true; +} + static inline enum intel_display_power_domain psr_aux_domain(struct intel_dp *intel_dp) { @@ -502,11 +548,6 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, if (!CAN_PSR(dev_priv)) return; - if (!i915_modparams.enable_psr) { - DRM_DEBUG_KMS("PSR disable by flag\n"); - return; - } - /* * HSW spec explicitly says PSR is tied to port A. * BDW+ platforms with DDI implementation of PSR have different @@ -559,7 +600,11 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, crtc_state->has_psr = true; crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state); - DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : ""); + + if (psr_global_enabled(dev_priv->psr.debugfs_mode)) + DRM_DEBUG_KMS("Enabling PSR%s\n", crtc_state->has_psr2 ? "2" : ""); + else + DRM_DEBUG_KMS("PSR disable by flag\n"); } static void intel_psr_activate(struct intel_dp *intel_dp) @@ -617,6 +662,38 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp, } } +static void __intel_psr_enable(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *crtc_state) +{ + struct intel_dp *intel_dp = dev_priv->psr.dp; + + if (dev_priv->psr.enabled) + return; + + dev_priv->psr.enabled = true; + + dev_priv->psr.setup_vsc(intel_dp, crtc_state); + dev_priv->psr.enable_sink(intel_dp); + dev_priv->psr.enable_source(intel_dp, crtc_state); + + if (INTEL_GEN(dev_priv) >= 9) { + intel_psr_activate(intel_dp); + } else { + /* + * FIXME: Activation should happen immediately since this + * function is just called after pipe is fully trained and + * enabled. + * However on some platforms we face issues when first + * activation follows a modeset so quickly. + * - On VLV/CHV we get bank screen on first activation + * - On HSW/BDW we get a recoverable frozen screen until + * next exit-activate sequence. + */ + schedule_delayed_work(&dev_priv->psr.work, + msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); + } +} + /** * intel_psr_enable - Enable PSR * @intel_dp: Intel DP @@ -639,7 +716,7 @@ void intel_psr_enable(struct intel_dp *intel_dp, WARN_ON(dev_priv->drrs.dp); mutex_lock(&dev_priv->psr.lock); - if (dev_priv->psr.enabled) { + if (dev_priv->psr.dp) { DRM_DEBUG_KMS("PSR already in use\n"); goto unlock; } @@ -647,27 +724,10 @@ void intel_psr_enable(struct intel_dp *intel_dp, dev_priv->psr.psr2_support = crtc_state->has_psr2; dev_priv->psr.busy_frontbuffer_bits = 0; - dev_priv->psr.setup_vsc(intel_dp, crtc_state); - dev_priv->psr.enable_sink(intel_dp); - dev_priv->psr.enable_source(intel_dp, crtc_state); - dev_priv->psr.enabled = intel_dp; + dev_priv->psr.dp = intel_dp; - if (INTEL_GEN(dev_priv) >= 9) { - intel_psr_activate(intel_dp); - } else { - /* - * FIXME: Activation should happen immediately since this - * function is just called after pipe is fully trained and - * enabled. - * However on some platforms we face issues when first - * activation follows a modeset so quickly. - * - On VLV/CHV we get bank screen on first activation - * - On HSW/BDW we get a recoverable frozen screen until - * next exit-activate sequence. - */ - schedule_delayed_work(&dev_priv->psr.work, - msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); - } + if (psr_global_enabled(dev_priv->psr.debugfs_mode)) + __intel_psr_enable(dev_priv, crtc_state); unlock: mutex_unlock(&dev_priv->psr.lock); @@ -752,6 +812,21 @@ static void hsw_psr_disable(struct intel_dp *intel_dp, psr_aux_io_power_put(intel_dp); } +static void __intel_psr_disable(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *old_crtc_state) +{ + struct intel_dp *intel_dp = dev_priv->psr.dp; + + if (!dev_priv->psr.enabled) + return; + + dev_priv->psr.disable_source(intel_dp, old_crtc_state); + + /* Disable PSR on Sink */ + drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); + dev_priv->psr.enabled = false; +} + /** * intel_psr_disable - Disable PSR * @intel_dp: Intel DP @@ -773,27 +848,110 @@ void intel_psr_disable(struct intel_dp *intel_dp, return; mutex_lock(&dev_priv->psr.lock); - if (!dev_priv->psr.enabled) { + if (intel_dp != dev_priv->psr.dp) { mutex_unlock(&dev_priv->psr.lock); return; } - dev_priv->psr.disable_source(intel_dp, old_crtc_state); - - /* Disable PSR on Sink */ - drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0); + __intel_psr_disable(dev_priv, old_crtc_state); - dev_priv->psr.enabled = NULL; + dev_priv->psr.dp = NULL; mutex_unlock(&dev_priv->psr.lock); cancel_delayed_work_sync(&dev_priv->psr.work); } +static struct drm_crtc * +find_idle_crtc_for_encoder(struct drm_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_connector_list_iter conn_iter; + struct drm_device *dev = encoder->dev; + struct drm_connector *connector; + struct drm_crtc *crtc; + bool found = false; + int ret; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) + if (connector->state->best_encoder == encoder) { + found = true; + break; + } + drm_connector_list_iter_end(&conn_iter); + + if (WARN_ON(!found)) + return ERR_PTR(-EINVAL); + + crtc = connector->state->crtc; + ret = drm_modeset_lock(&crtc->mutex, ctx); + if (ret) + return ERR_PTR(ret); + + if (connector->state->commit) + ret = wait_for_completion_interruptible(&connector->state->commit->hw_done); + if (!ret && crtc->state->commit) + ret = wait_for_completion_interruptible(&crtc->state->commit->hw_done); + if (ret) + return ERR_PTR(ret); + + return crtc; +} + +int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv, + struct drm_modeset_acquire_ctx *ctx, + enum i915_psr_debugfs_mode mode) +{ + struct drm_device *dev = &dev_priv->drm; + struct drm_encoder *encoder; + struct drm_crtc *crtc; + int ret; + bool enable, link_standby; + + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); + if (ret) + return ret; + + link_standby = get_link_standby_for_mode(dev_priv, mode); + enable = psr_global_enabled(mode); + + mutex_lock(&dev_priv->psr.lock); + + do { + if (!dev_priv->psr.dp) { + dev_priv->psr.debugfs_mode = mode; + dev_priv->psr.link_standby = link_standby; + goto end; + } + encoder = &dp_to_dig_port(dev_priv->psr.dp)->base.base; + mutex_unlock(&dev_priv->psr.lock); + + crtc = find_idle_crtc_for_encoder(encoder, ctx); + if (IS_ERR(crtc)) + return PTR_ERR(crtc); + + mutex_lock(&dev_priv->psr.lock); + } while (dev_priv->psr.dp != enc_to_intel_dp(encoder)); + + if (!enable || dev_priv->psr.link_standby != link_standby) + __intel_psr_disable(dev_priv, to_intel_crtc_state(crtc->state)); + + dev_priv->psr.debugfs_mode = mode; + dev_priv->psr.link_standby = link_standby; + + if (enable) + __intel_psr_enable(dev_priv, to_intel_crtc_state(crtc->state)); + +end: + mutex_unlock(&dev_priv->psr.lock); + return ret; +} + static void intel_psr_work(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), psr.work.work); - struct intel_dp *intel_dp = dev_priv->psr.enabled; + struct intel_dp *intel_dp = dev_priv->psr.dp; struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; @@ -833,11 +991,11 @@ static void intel_psr_work(struct work_struct *work) } } mutex_lock(&dev_priv->psr.lock); - intel_dp = dev_priv->psr.enabled; - - if (!intel_dp) + if (!dev_priv->psr.enabled) goto unlock; + intel_dp = dev_priv->psr.dp; + /* * The delayed work can race with an invalidate hence we need to * recheck. Since psr_flush first clears this and then reschedules we @@ -853,7 +1011,7 @@ static void intel_psr_work(struct work_struct *work) static void intel_psr_exit(struct drm_i915_private *dev_priv) { - struct intel_dp *intel_dp = dev_priv->psr.enabled; + struct intel_dp *intel_dp = dev_priv->psr.dp; struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; enum pipe pipe = to_intel_crtc(crtc)->pipe; u32 val; @@ -938,7 +1096,7 @@ void intel_psr_single_frame_update(struct drm_i915_private *dev_priv, return; } - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; if (frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)) { @@ -984,7 +1142,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, return; } - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); @@ -1027,7 +1185,7 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, return; } - crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc; + crtc = dp_to_dig_port(dev_priv->psr.dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe); @@ -1081,26 +1239,8 @@ void intel_psr_init(struct drm_i915_private *dev_priv) if (i915_modparams.enable_psr == -1) i915_modparams.enable_psr = 0; - /* Set link_standby x link_off defaults */ - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - /* HSW and BDW require workarounds that we don't implement. */ - dev_priv->psr.link_standby = false; - else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - /* On VLV and CHV only standby mode is supported. */ - dev_priv->psr.link_standby = true; - else - /* For new platforms let's respect VBT back again */ - dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; - - /* Override link_standby x link_off defaults */ - if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) { - DRM_DEBUG_KMS("PSR: Forcing link standby\n"); - dev_priv->psr.link_standby = true; - } - if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) { - DRM_DEBUG_KMS("PSR: Forcing main link off\n"); - dev_priv->psr.link_standby = false; - } + dev_priv->psr.link_standby = intel_psr_default_link_standby(dev_priv); + dev_priv->psr.debugfs_mode = PSR_DEBUGFS_MODE_DEFAULT; INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work); mutex_init(&dev_priv->psr.lock); -- 2.16.2 _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx