On Mon, Apr 06, 2020 at 06:11:55PM -0700, José Roberto de Souza wrote: > TC ports can enter in TCCOLD to save power and is required to request > to PCODE to exit this state before use or read to TC registers. > > For TGL there is a new MBOX command to do that with a parameter to ask > PCODE to exit and block TCCOLD entry or unblock TCCOLD entry. > > So adding a new power domain to reuse the refcount and only allow > TC cold when all TC ports are not in use. > > v2: > - fixed missing case in intel_display_power_domain_str() > - moved tgl_tc_cold_request to intel_display_power.c > - renamed TGL_TC_COLD_OFF to TGL_TC_COLD_OFF_POWER_DOMAINS > - added all TC and TBT aux power domains to > TGL_TC_COLD_OFF_POWER_DOMAINS > > BSpec: 49294 > Cc: Imre Deak <imre.deak@xxxxxxxxx> > Cc: Cooper Chiou <cooper.chiou@xxxxxxxxx> > Cc: Kai-Heng Feng <kai.heng.feng@xxxxxxxxxxxxx> > Signed-off-by: José Roberto de Souza <jose.souza@xxxxxxxxx> > --- > .../drm/i915/display/intel_display_power.c | 98 +++++++++++++++++++ > .../drm/i915/display/intel_display_power.h | 1 + > drivers/gpu/drm/i915/display/intel_tc.c | 17 +++- > drivers/gpu/drm/i915/i915_reg.h | 3 + > 4 files changed, 116 insertions(+), 3 deletions(-) > > diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c > index 0383801a9acc..5d33929f3724 100644 > --- a/drivers/gpu/drm/i915/display/intel_display_power.c > +++ b/drivers/gpu/drm/i915/display/intel_display_power.c > @@ -151,6 +151,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) > return "GT_IRQ"; > case POWER_DOMAIN_DPLL_DC_OFF: > return "DPLL_DC_OFF"; > + case POWER_DOMAIN_TC_COLD_OFF: > + return "TC_COLD_OFF"; > default: > MISSING_CASE(domain); > return "?"; > @@ -2858,6 +2860,21 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, > #define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ > BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) > > +#define TGL_TC_COLD_OFF_POWER_DOMAINS ( \ > + BIT_ULL(POWER_DOMAIN_AUX_D) | \ > + BIT_ULL(POWER_DOMAIN_AUX_E) | \ > + BIT_ULL(POWER_DOMAIN_AUX_F) | \ > + BIT_ULL(POWER_DOMAIN_AUX_G) | \ > + BIT_ULL(POWER_DOMAIN_AUX_H) | \ > + BIT_ULL(POWER_DOMAIN_AUX_I) | \ > + BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ > + BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ > + BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ > + BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ > + BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ > + BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ > + BIT_ULL(POWER_DOMAIN_TC_COLD_OFF)) > + > static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { > .sync_hw = i9xx_power_well_sync_hw_noop, > .enable = i9xx_always_on_power_well_noop, > @@ -3960,6 +3977,81 @@ static const struct i915_power_well_desc ehl_power_wells[] = { > }, > }; > > +static void > +tgl_tc_cold_request(struct drm_i915_private *i915, bool block) > +{ > + u32 low_val, high_val; Can be moved to their scope. > + u8 tries = 0; > + int ret; > + > + do { > + low_val = 0; > + high_val = block ? 0 : TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ; > + > + /* > + * Spec states that we should timeout the request after 200us > + * but the function below will timeout after 500us > + */ > + ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, > + &high_val); > + if (ret == 0) { > + if (block && > + (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) > + ret = -EIO; > + else > + break; > + } > + > + if (ret != -EAGAIN) > + tries++; -EAGAIN means that the PCODE run/busy flag didn't get cleared in the previous iteration, and BSpec says to bail out and not to use the port in that case. But we can't really do that, so let's give some slack to PUNIT (and CPU), msleep(1) so the next iteration doesn't return immediately and still protect against an endless loop? > + } while (tries < 3); > + > + drm_dbg_kms(&i915->drm, "TC cold %sblock %s\n", block ? "" : "un", > + ret == 0 ? "succeeded" : "failed"); Isn't the fail always a true error? (also on ICL) > +} > + > +static void > +tgl_tc_cold_off_power_well_enable(struct drm_i915_private *i915, > + struct i915_power_well *power_well) > +{ > + tgl_tc_cold_request(i915, true); > +} > + > +static void > +tgl_tc_cold_off_power_well_disable(struct drm_i915_private *i915, > + struct i915_power_well *power_well) > +{ > + tgl_tc_cold_request(i915, false); > +} > + > +static void > +tgl_tc_cold_off_power_well_sync_hw(struct drm_i915_private *i915, > + struct i915_power_well *power_well) > +{ > + if (power_well->count > 0) > + tgl_tc_cold_off_power_well_enable(i915, power_well); > + else > + tgl_tc_cold_off_power_well_disable(i915, power_well); > +} > + > +static bool > +tgl_tc_cold_off_power_well_is_enabled(struct drm_i915_private *dev_priv, > + struct i915_power_well *power_well) > +{ > + /* > + * Not the correctly implementation but there is no way to just read it > + * from PCODE, so returning count to avoid state mismatch errors > + */ > + return power_well->count; > +} > + > +static const struct i915_power_well_ops tgl_tc_cold_off_ops = { > + .sync_hw = tgl_tc_cold_off_power_well_sync_hw, > + .enable = tgl_tc_cold_off_power_well_enable, > + .disable = tgl_tc_cold_off_power_well_disable, > + .is_enabled = tgl_tc_cold_off_power_well_is_enabled, > +}; > + > static const struct i915_power_well_desc tgl_power_wells[] = { > { > .name = "always-on", > @@ -4287,6 +4379,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = { > .hsw.irq_pipe_mask = BIT(PIPE_D), > }, > }, > + { > + .name = "TC cold off", > + .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, > + .ops = &tgl_tc_cold_off_ops, > + .id = DISP_PW_ID_NONE, > + }, > }; > > static int > diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h > index da64a5edae7a..070457e7b948 100644 > --- a/drivers/gpu/drm/i915/display/intel_display_power.h > +++ b/drivers/gpu/drm/i915/display/intel_display_power.h > @@ -76,6 +76,7 @@ enum intel_display_power_domain { > POWER_DOMAIN_MODESET, > POWER_DOMAIN_GT_IRQ, > POWER_DOMAIN_DPLL_DC_OFF, > + POWER_DOMAIN_TC_COLD_OFF, > POWER_DOMAIN_INIT, > > POWER_DOMAIN_NUM, > diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c > index 7564259d677e..83861653768d 100644 > --- a/drivers/gpu/drm/i915/display/intel_tc.c > +++ b/drivers/gpu/drm/i915/display/intel_tc.c > @@ -53,16 +53,27 @@ tc_port_load_fia_params(struct drm_i915_private *i915, > } > } > > +static enum intel_display_power_domain > +tc_cold_get_power_domain(struct intel_digital_port *dig_port) > +{ > + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); > + > + if (INTEL_GEN(i915) == 11) > + return intel_legacy_aux_to_power_domain(dig_port->aux_ch); > + else > + return POWER_DOMAIN_TC_COLD_OFF; > +} > + > static intel_wakeref_t > tc_cold_block(struct intel_digital_port *dig_port) > { > struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); > enum intel_display_power_domain domain; > > - if (INTEL_GEN(i915) != 11 || !dig_port->tc_legacy_port) > + if (INTEL_GEN(i915) == 11 && !dig_port->tc_legacy_port) > return 0; > > - domain = intel_legacy_aux_to_power_domain(dig_port->aux_ch); > + domain = tc_cold_get_power_domain(dig_port); > return intel_display_power_get(i915, domain); > } > > @@ -80,7 +91,7 @@ tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref) > if (wakeref == 0) > return; > > - domain = intel_legacy_aux_to_power_domain(dig_port->aux_ch); > + domain = tc_cold_get_power_domain(dig_port); > intel_display_power_put_async(i915, domain, wakeref); > } > > diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h > index 5cbcd01ac3d5..e04eec003d4b 100644 > --- a/drivers/gpu/drm/i915/i915_reg.h > +++ b/drivers/gpu/drm/i915/i915_reg.h > @@ -9110,6 +9110,9 @@ enum { > #define ICL_PCODE_EXIT_TCCOLD 0x12 > #define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17 > #define DISPLAY_IPS_CONTROL 0x19 > +#define TGL_PCODE_TCCOLD 0x26 > +#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0) > +#define TGL_PCODE_EXIT_TCCOLD_DATA_H_UNBLOCK_REQ REG_BIT(0) > /* See also IPS_CTL */ > #define IPS_PCODE_CONTROL (1 << 30) > #define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A > -- > 2.26.0 > _______________________________________________ Intel-gfx mailing list Intel-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/intel-gfx