[PATCH v13 1/2] drm/i915/display: Support PSR Multiple Instances

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



It is a preliminary work for supporting multiple EDP PSR and
DP PanelReplay. And it refactors singleton PSR to Multi Transcoder
supportable PSR.
And this moves and renames the i915_psr structure of drm_i915_private's to
intel_dp's intel_psr structure.
It also causes changes in PSR interrupt handling routine for supporting
multiple transcoders. But it does not change the scenario and timing of
enabling and disabling PSR. And it not support multiple pipes with
a single transcoder PSR case yet.

v2: Fix indentation and add comments
v3: Remove Blank line
v4: Rebased
v5: Rebased and Addressed Anshuman's review comment.
    - Move calling of intel_psr_init() to intel_dp_init_connector()
v6: Address Anshuman's review comments
   - Remove wrong comments and add comments for a limit of supporting of
     a single pipe PSR
v7: Update intel_psr_compute_config() for supporting multiple transcoder
    PSR on BDW+
v8: Address Anshuman's review comments
   - Replace DRM_DEBUG_KMS with drm_dbg_kms() / DRM_WARN with drm_warn()
v9: Fix commit message
v10: Rebased
v11: Address Jose's review comment.
  - Reorder calling order of intel_psr2_program_trans_man_trk_ctl().
  - In order to reduce changes keep the old name for drm_i915_private.
  - Change restrictions of multiple instances of PSR.
v12: Address Jose's review comment.
  - Change the calling of intel_psr2_program_trans_man_trk_ctl() into
    commit_pipe_config().
  - Change a checking order of CAN_PSR() and connector_status to original
    on i915_psr_sink_status_show().
  - Drop unneeded intel_dp_update_pipe() function.
  - In order to wait a specific encoder which belong to crtc_state on
    intel_psr_wait_for_idle(), add checking of encoder.
  - Add an whitespace to comments.
v13: Rebased and Address Jose's review comment.
  - Add and use for_each_intel_psr_enabled_encoder() macro.
  - In order to use correct frontbuffer_bit for each pipe,
    fix intel_psr_invalidate() and intel_psr_flush().
  - Remove redundant or unneeded codes.
  - Update comments.

Signed-off-by: Gwan-gyeong Mun <gwan-gyeong.mun@xxxxxxxxx>
Cc: José Roberto de Souza <jose.souza@xxxxxxxxx>
Cc: Juha-Pekka Heikkila <juhapekka.heikkila@xxxxxxxxx>
Cc: Anshuman Gupta <anshuman.gupta@xxxxxxxxx>
Reviewed-by: Anshuman Gupta <anshuman.gupta@xxxxxxxxx>
---
 drivers/gpu/drm/i915/display/intel_display.c  |   2 -
 drivers/gpu/drm/i915/display/intel_display.h  |   8 +
 .../drm/i915/display/intel_display_debugfs.c  | 105 +++-
 .../drm/i915/display/intel_display_types.h    |  46 ++
 drivers/gpu/drm/i915/display/intel_dp.c       |  10 +-
 drivers/gpu/drm/i915/display/intel_psr.c      | 576 ++++++++++--------
 drivers/gpu/drm/i915/display/intel_psr.h      |  11 +-
 drivers/gpu/drm/i915/display/intel_sprite.c   |   6 +-
 drivers/gpu/drm/i915/i915_drv.h               |  38 --
 drivers/gpu/drm/i915/i915_irq.c               |  48 +-
 10 files changed, 485 insertions(+), 365 deletions(-)

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 667d941878a3..24df9dfb57fb 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -14135,8 +14135,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
 		intel_dvo_init(dev_priv);
 	}
 
-	intel_psr_init(dev_priv);
-
 	for_each_intel_encoder(&dev_priv->drm, encoder) {
 		encoder->base.possible_crtcs =
 			intel_encoder_possible_crtcs(encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 64ffa34544a7..fc41d0d9e5a5 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -417,6 +417,14 @@ enum phy_fia {
 		for_each_if((encoder_mask) &				\
 			    drm_encoder_mask(&intel_encoder->base))
 
+#define for_each_intel_encoder_is_psr_enabled(dev, intel_encoder, encoder_mask) \
+	list_for_each_entry(intel_encoder,				\
+			    &(dev)->mode_config.encoder_list,		\
+			    base.head)					\
+		for_each_if(((encoder_mask) &				\
+			     drm_encoder_mask(&intel_encoder->base)) &&	\
+			    intel_encoder_is_psr_enabled(intel_encoder))
+
 #define for_each_intel_dp(dev, intel_encoder)			\
 	for_each_intel_encoder(dev, intel_encoder)		\
 		for_each_if(intel_encoder_is_dp(intel_encoder))
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index d62b18d5ecd8..9868253df61f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -249,12 +249,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
 		"sink internal error",
 	};
 	struct drm_connector *connector = m->private;
-	struct drm_i915_private *dev_priv = to_i915(connector->dev);
 	struct intel_dp *intel_dp =
 		intel_attached_dp(to_intel_connector(connector));
 	int ret;
 
-	if (!CAN_PSR(dev_priv)) {
+	if (!CAN_PSR(intel_dp)) {
 		seq_puts(m, "PSR Unsupported\n");
 		return -ENODEV;
 	}
@@ -280,12 +279,13 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
 DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
 
 static void
-psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
 {
 	u32 val, status_val;
 	const char *status = "unknown";
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
-	if (dev_priv->psr.psr2_enabled) {
+	if (intel_dp->psr.psr2_enabled) {
 		static const char * const live_status[] = {
 			"IDLE",
 			"CAPTURE",
@@ -300,7 +300,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
 			"TG_ON"
 		};
 		val = intel_de_read(dev_priv,
-				    EDP_PSR2_STATUS(dev_priv->psr.transcoder));
+				    EDP_PSR2_STATUS(intel_dp->psr.transcoder));
 		status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
 			      EDP_PSR2_STATUS_STATE_SHIFT;
 		if (status_val < ARRAY_SIZE(live_status))
@@ -317,7 +317,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
 			"SRDENT_ON",
 		};
 		val = intel_de_read(dev_priv,
-				    EDP_PSR_STATUS(dev_priv->psr.transcoder));
+				    EDP_PSR_STATUS(intel_dp->psr.transcoder));
 		status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
 			      EDP_PSR_STATUS_STATE_SHIFT;
 		if (status_val < ARRAY_SIZE(live_status))
@@ -327,21 +327,18 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
 	seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
 }
 
-static int i915_edp_psr_status(struct seq_file *m, void *data)
+static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
 {
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct i915_psr *psr = &dev_priv->psr;
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	struct intel_psr *psr = &intel_dp->psr;
 	intel_wakeref_t wakeref;
 	const char *status;
 	bool enabled;
 	u32 val;
 
-	if (!HAS_PSR(dev_priv))
-		return -ENODEV;
-
 	seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
-	if (psr->dp)
-		seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
+	if (psr->sink_support)
+		seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
 	seq_puts(m, "\n");
 
 	if (!psr->sink_support)
@@ -365,16 +362,16 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 
 	if (psr->psr2_enabled) {
 		val = intel_de_read(dev_priv,
-				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
+				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
 		enabled = val & EDP_PSR2_ENABLE;
 	} else {
 		val = intel_de_read(dev_priv,
-				    EDP_PSR_CTL(dev_priv->psr.transcoder));
+				    EDP_PSR_CTL(intel_dp->psr.transcoder));
 		enabled = val & EDP_PSR_ENABLE;
 	}
 	seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
 		   enableddisabled(enabled), val);
-	psr_source_status(dev_priv, m);
+	psr_source_status(intel_dp, m);
 	seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
 		   psr->busy_frontbuffer_bits);
 
@@ -383,7 +380,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 	 */
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
 		val = intel_de_read(dev_priv,
-				    EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+				    EDP_PSR_PERF_CNT(intel_dp->psr.transcoder));
 		val &= EDP_PSR_PERF_CNT_MASK;
 		seq_printf(m, "Performance counter: %u\n", val);
 	}
@@ -404,7 +401,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 		 */
 		for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
 			val = intel_de_read(dev_priv,
-					    PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
+					    PSR2_SU_STATUS(intel_dp->psr.transcoder, frame));
 			su_frames_val[frame / 3] = val;
 		}
 
@@ -430,23 +427,57 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
 	return 0;
 }
 
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+	struct drm_i915_private *dev_priv = node_to_i915(m->private);
+	struct intel_encoder *encoder;
+	struct intel_dp *intel_dp = NULL;
+
+	if (!HAS_PSR(dev_priv))
+		return -ENODEV;
+
+	/* Find the first EDP */
+	for_each_intel_dp(&dev_priv->drm, encoder) {
+		if (encoder->type == INTEL_OUTPUT_EDP) {
+			intel_dp = enc_to_intel_dp(encoder);
+			break;
+		}
+	}
+
+	if (!intel_dp)
+		return -ENODEV;
+
+	return intel_psr_status(m, intel_dp);
+}
+
 static int
 i915_edp_psr_debug_set(void *data, u64 val)
 {
 	struct drm_i915_private *dev_priv = data;
 	intel_wakeref_t wakeref;
-	int ret;
+	int ret = -ENODEV;
+	struct intel_encoder *encoder;
 
-	if (!CAN_PSR(dev_priv))
-		return -ENODEV;
+	if (!HAS_PSR(dev_priv))
+		return ret;
 
-	drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+	for_each_intel_dp(&dev_priv->drm, encoder) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
-	wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+		if (!CAN_PSR(intel_dp))
+			continue;
 
-	ret = intel_psr_debug_set(dev_priv, val);
+		if (encoder->type == INTEL_OUTPUT_EDP) {
+			drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
 
-	intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+			wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+			// TODO: split to each transcoder's PSR debug state
+			ret = intel_psr_debug_set(intel_dp, val);
+
+			intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+		}
+	}
 
 	return ret;
 }
@@ -455,12 +486,25 @@ static int
 i915_edp_psr_debug_get(void *data, u64 *val)
 {
 	struct drm_i915_private *dev_priv = data;
+	struct intel_encoder *encoder;
 
-	if (!CAN_PSR(dev_priv))
+	if (!HAS_PSR(dev_priv))
 		return -ENODEV;
 
-	*val = READ_ONCE(dev_priv->psr.debug);
-	return 0;
+	for_each_intel_dp(&dev_priv->drm, encoder) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		if (!CAN_PSR(intel_dp))
+			continue;
+
+		// TODO: split to each transcoder's PSR debug state
+		if (encoder->type == INTEL_OUTPUT_EDP) {
+			*val = READ_ONCE(intel_dp->psr.debug);
+			return 0;
+		}
+	}
+
+	return -ENODEV;
 }
 
 DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
@@ -1233,9 +1277,6 @@ static void drrs_status_per_crtc(struct seq_file *m,
 		/* disable_drrs() will make drrs->dp NULL */
 		if (!drrs->dp) {
 			seq_puts(m, "Idleness DRRS: Disabled\n");
-			if (dev_priv->psr.enabled)
-				seq_puts(m,
-				"\tAs PSR is enabled, DRRS is not enabled\n");
 			mutex_unlock(&drrs->mutex);
 			return;
 		}
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 39397748b4b0..63daf8b45941 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1415,6 +1415,42 @@ struct intel_pps {
 	struct edp_power_seq pps_delays;
 };
 
+struct intel_psr {
+	/* Mutex for PSR state of the transcoder */
+	struct mutex lock;
+
+#define I915_PSR_DEBUG_MODE_MASK	0x0f
+#define I915_PSR_DEBUG_DEFAULT		0x00
+#define I915_PSR_DEBUG_DISABLE		0x01
+#define I915_PSR_DEBUG_ENABLE		0x02
+#define I915_PSR_DEBUG_FORCE_PSR1	0x03
+#define I915_PSR_DEBUG_IRQ		0x10
+
+	u32 debug;
+	bool sink_support;
+	bool enabled;
+	enum pipe pipe;
+	enum transcoder transcoder;
+	bool active;
+	struct work_struct work;
+	unsigned int busy_frontbuffer_bits;
+	bool sink_psr2_support;
+	bool link_standby;
+	bool colorimetry_support;
+	bool psr2_enabled;
+	bool psr2_sel_fetch_enabled;
+	u8 sink_sync_latency;
+	ktime_t last_entry_attempt;
+	ktime_t last_exit;
+	bool sink_not_reliable;
+	bool irq_aux_error;
+	u16 su_x_granularity;
+	bool dc3co_enabled;
+	u32 dc3co_exit_delay;
+	struct delayed_work dc3co_work;
+	struct drm_dp_vsc_sdp vsc;
+};
+
 struct intel_dp {
 	i915_reg_t output_reg;
 	u32 DP;
@@ -1517,6 +1553,8 @@ struct intel_dp {
 	bool hobl_active;
 
 	struct intel_dp_pcon_frl frl;
+
+	struct intel_psr psr;
 };
 
 enum lspcon_vendor {
@@ -1729,6 +1767,14 @@ static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
 	}
 }
 
+static inline bool intel_encoder_is_psr_enabled(struct intel_encoder *encoder)
+{
+	if (!intel_encoder_is_dp(encoder))
+		return false;
+
+	return enc_to_intel_dp(encoder)->psr.enabled;
+}
+
 static inline struct intel_lspcon *
 enc_to_intel_lspcon(struct intel_encoder *encoder)
 {
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 8c12d5375607..d532bd56250f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1663,12 +1663,10 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
 				  const struct drm_connector_state *conn_state,
 				  struct drm_dp_vsc_sdp *vsc)
 {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
 	vsc->sdp_type = DP_SDP_VSC;
 
-	if (dev_priv->psr.psr2_enabled) {
-		if (dev_priv->psr.colorimetry_support &&
+	if (intel_dp->psr.psr2_enabled) {
+		if (intel_dp->psr.colorimetry_support &&
 		    intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
 			/* [PSR2, +Colorimetry] */
 			intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
@@ -2359,7 +2357,7 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
 		return false;
 	}
 
-	if (CAN_PSR(i915) && intel_dp_is_edp(intel_dp)) {
+	if (CAN_PSR(intel_dp) && intel_dp_is_edp(intel_dp)) {
 		drm_dbg_kms(&i915->drm, "Forcing full modeset to compute PSR state\n");
 		crtc_state->uapi.mode_changed = true;
 		return false;
@@ -6641,6 +6639,8 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
 	intel_dp->frl.is_trained = false;
 	intel_dp->frl.trained_rate_gbps = 0;
 
+	intel_psr_init(intel_dp);
+
 	return true;
 
 fail:
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 2c365b778f74..925c0c6d92cd 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -80,9 +80,11 @@
  * use page flips.
  */
 
-static bool psr_global_enabled(struct drm_i915_private *i915)
+static bool psr_global_enabled(struct intel_dp *intel_dp)
 {
-	switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+	struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
 	case I915_PSR_DEBUG_DEFAULT:
 		return i915->params.enable_psr;
 	case I915_PSR_DEBUG_DISABLE:
@@ -92,9 +94,9 @@ static bool psr_global_enabled(struct drm_i915_private *i915)
 	}
 }
 
-static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
+static bool psr2_global_enabled(struct intel_dp *intel_dp)
 {
-	switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
+	switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
 	case I915_PSR_DEBUG_DISABLE:
 	case I915_PSR_DEBUG_FORCE_PSR1:
 		return false;
@@ -103,11 +105,12 @@ static bool psr2_global_enabled(struct drm_i915_private *dev_priv)
 	}
 }
 
-static void psr_irq_control(struct drm_i915_private *dev_priv)
+static void psr_irq_control(struct intel_dp *intel_dp)
 {
 	enum transcoder trans_shift;
 	u32 mask, val;
 	i915_reg_t imr_reg;
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 	/*
 	 * gen12+ has registers relative to transcoder and one per transcoder
@@ -116,14 +119,14 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
 	 */
 	if (INTEL_GEN(dev_priv) >= 12) {
 		trans_shift = 0;
-		imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
 	} else {
-		trans_shift = dev_priv->psr.transcoder;
+		trans_shift = intel_dp->psr.transcoder;
 		imr_reg = EDP_PSR_IMR;
 	}
 
 	mask = EDP_PSR_ERROR(trans_shift);
-	if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ)
+	if (intel_dp->psr.debug & I915_PSR_DEBUG_IRQ)
 		mask |= EDP_PSR_POST_EXIT(trans_shift) |
 			EDP_PSR_PRE_ENTRY(trans_shift);
 
@@ -172,30 +175,31 @@ static void psr_event_print(struct drm_i915_private *i915,
 		drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
 }
 
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
+void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
 {
-	enum transcoder cpu_transcoder = dev_priv->psr.transcoder;
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
 	enum transcoder trans_shift;
 	i915_reg_t imr_reg;
 	ktime_t time_ns =  ktime_get();
 
 	if (INTEL_GEN(dev_priv) >= 12) {
 		trans_shift = 0;
-		imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder);
+		imr_reg = TRANS_PSR_IMR(intel_dp->psr.transcoder);
 	} else {
-		trans_shift = dev_priv->psr.transcoder;
+		trans_shift = intel_dp->psr.transcoder;
 		imr_reg = EDP_PSR_IMR;
 	}
 
 	if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
-		dev_priv->psr.last_entry_attempt = time_ns;
+		intel_dp->psr.last_entry_attempt = time_ns;
 		drm_dbg_kms(&dev_priv->drm,
 			    "[transcoder %s] PSR entry attempt in 2 vblanks\n",
 			    transcoder_name(cpu_transcoder));
 	}
 
 	if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
-		dev_priv->psr.last_exit = time_ns;
+		intel_dp->psr.last_exit = time_ns;
 		drm_dbg_kms(&dev_priv->drm,
 			    "[transcoder %s] PSR exit completed\n",
 			    transcoder_name(cpu_transcoder));
@@ -203,7 +207,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 		if (INTEL_GEN(dev_priv) >= 9) {
 			u32 val = intel_de_read(dev_priv,
 						PSR_EVENT(cpu_transcoder));
-			bool psr2_enabled = dev_priv->psr.psr2_enabled;
+			bool psr2_enabled = intel_dp->psr.psr2_enabled;
 
 			intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
 				       val);
@@ -217,7 +221,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 		drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
 			 transcoder_name(cpu_transcoder));
 
-		dev_priv->psr.irq_aux_error = true;
+		intel_dp->psr.irq_aux_error = true;
 
 		/*
 		 * If this interruption is not masked it will keep
@@ -231,7 +235,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
 		val |= EDP_PSR_ERROR(trans_shift);
 		intel_de_write(dev_priv, imr_reg, val);
 
-		schedule_work(&dev_priv->psr.work);
+		schedule_work(&intel_dp->psr.work);
 	}
 }
 
@@ -292,12 +296,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 	struct drm_i915_private *dev_priv =
 		to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
 
-	if (dev_priv->psr.dp) {
-		drm_warn(&dev_priv->drm,
-			 "More than one eDP panel found, PSR support should be extended\n");
-		return;
-	}
-
 	drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
 			 sizeof(intel_dp->psr_dpcd));
 
@@ -318,12 +316,10 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 		return;
 	}
 
-	dev_priv->psr.sink_support = true;
-	dev_priv->psr.sink_sync_latency =
+	intel_dp->psr.sink_support = true;
+	intel_dp->psr.sink_sync_latency =
 		intel_dp_get_sink_sync_latency(intel_dp);
 
-	dev_priv->psr.dp = intel_dp;
-
 	if (INTEL_GEN(dev_priv) >= 9 &&
 	    (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
 		bool y_req = intel_dp->psr_dpcd[1] &
@@ -341,14 +337,14 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
 		 * Y-coordinate requirement panels we would need to enable
 		 * GTC first.
 		 */
-		dev_priv->psr.sink_psr2_support = y_req && alpm;
+		intel_dp->psr.sink_psr2_support = y_req && alpm;
 		drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
-			    dev_priv->psr.sink_psr2_support ? "" : "not ");
+			    intel_dp->psr.sink_psr2_support ? "" : "not ");
 
-		if (dev_priv->psr.sink_psr2_support) {
-			dev_priv->psr.colorimetry_support =
+		if (intel_dp->psr.sink_psr2_support) {
+			intel_dp->psr.colorimetry_support =
 				intel_dp_get_colorimetry_status(intel_dp);
-			dev_priv->psr.su_x_granularity =
+			intel_dp->psr.su_x_granularity =
 				intel_dp_get_su_x_granulartiy(intel_dp);
 		}
 	}
@@ -374,7 +370,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 	BUILD_BUG_ON(sizeof(aux_msg) > 20);
 	for (i = 0; i < sizeof(aux_msg); i += 4)
 		intel_de_write(dev_priv,
-			       EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
+			       EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
 			       intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
 
 	aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -385,7 +381,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
 
 	/* Select only valid bits for SRD_AUX_CTL */
 	aux_ctl &= psr_aux_mask;
-	intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder),
+	intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
 		       aux_ctl);
 }
 
@@ -395,14 +391,14 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
 	u8 dpcd_val = DP_PSR_ENABLE;
 
 	/* Enable ALPM at sink for psr2 */
-	if (dev_priv->psr.psr2_enabled) {
+	if (intel_dp->psr.psr2_enabled) {
 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
 				   DP_ALPM_ENABLE |
 				   DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
 
 		dpcd_val |= DP_PSR_ENABLE_PSR2 | DP_PSR_IRQ_HPD_WITH_CRC_ERRORS;
 	} else {
-		if (dev_priv->psr.link_standby)
+		if (intel_dp->psr.link_standby)
 			dpcd_val |= DP_PSR_MAIN_LINK_ACTIVE;
 
 		if (INTEL_GEN(dev_priv) >= 8)
@@ -465,7 +461,7 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
 	 * off-by-one issue that HW has in some cases.
 	 */
 	idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-	idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+	idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
 
 	if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
 		idle_frames = 0xf;
@@ -485,7 +481,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
 	if (IS_HASWELL(dev_priv))
 		val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
-	if (dev_priv->psr.link_standby)
+	if (intel_dp->psr.link_standby)
 		val |= EDP_PSR_LINK_STANDBY;
 
 	val |= intel_psr1_get_tp_time(intel_dp);
@@ -493,9 +489,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
 	if (INTEL_GEN(dev_priv) >= 8)
 		val |= EDP_PSR_CRC_ENABLE;
 
-	val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+	val |= (intel_de_read(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder)) &
 		EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
-	intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), val);
 }
 
 static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
@@ -530,7 +526,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 	if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
 		val |= EDP_Y_COORDINATE_ENABLE;
 
-	val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
+	val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
 	val |= intel_psr2_get_tp_time(intel_dp);
 
 	if (INTEL_GEN(dev_priv) >= 12) {
@@ -549,7 +545,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 		val |= EDP_PSR2_FAST_WAKE(7);
 	}
 
-	if (dev_priv->psr.psr2_sel_fetch_enabled) {
+	if (intel_dp->psr.psr2_sel_fetch_enabled) {
 		/* WA 1408330847 */
 		if (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
 		    IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
@@ -558,20 +554,20 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
 				     DIS_RAM_BYPASS_PSR2_MAN_TRACK);
 
 		intel_de_write(dev_priv,
-			       PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder),
+			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
 			       PSR2_MAN_TRK_CTL_ENABLE);
 	} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
 		intel_de_write(dev_priv,
-			       PSR2_MAN_TRK_CTL(dev_priv->psr.transcoder), 0);
+			       PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
 	}
 
 	/*
 	 * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
 	 * recommending keep this bit unset while PSR2 is enabled.
 	 */
-	intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+	intel_de_write(dev_priv, EDP_PSR_CTL(intel_dp->psr.transcoder), 0);
 
-	intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
 }
 
 static bool
@@ -594,55 +590,58 @@ static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
 			    drm_mode_vrefresh(&cstate->hw.adjusted_mode));
 }
 
-static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
+static void psr2_program_idle_frames(struct intel_dp *intel_dp,
 				     u32 idle_frames)
 {
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	u32 val;
 
 	idle_frames <<=  EDP_PSR2_IDLE_FRAME_SHIFT;
-	val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder));
+	val = intel_de_read(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder));
 	val &= ~EDP_PSR2_IDLE_FRAME_MASK;
 	val |= idle_frames;
-	intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+	intel_de_write(dev_priv, EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
 }
 
-static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
 {
-	psr2_program_idle_frames(dev_priv, 0);
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+	psr2_program_idle_frames(intel_dp, 0);
 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
 }
 
-static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
+static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
 {
-	struct intel_dp *intel_dp = dev_priv->psr.dp;
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 	intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
-	psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp));
+	psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
 }
 
 static void tgl_dc3co_disable_work(struct work_struct *work)
 {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), psr.dc3co_work.work);
+	struct intel_dp *intel_dp =
+		container_of(work, typeof(*intel_dp), psr.dc3co_work.work);
 
-	mutex_lock(&dev_priv->psr.lock);
+	mutex_lock(&intel_dp->psr.lock);
 	/* If delayed work is pending, it is not idle */
-	if (delayed_work_pending(&dev_priv->psr.dc3co_work))
+	if (delayed_work_pending(&intel_dp->psr.dc3co_work))
 		goto unlock;
 
-	tgl_psr2_disable_dc3co(dev_priv);
+	tgl_psr2_disable_dc3co(intel_dp);
 unlock:
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&intel_dp->psr.lock);
 }
 
-static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
+static void tgl_disallow_dc3co_on_psr2_exit(struct intel_dp *intel_dp)
 {
-	if (!dev_priv->psr.dc3co_enabled)
+	if (!intel_dp->psr.dc3co_enabled)
 		return;
 
-	cancel_delayed_work(&dev_priv->psr.dc3co_work);
+	cancel_delayed_work(&intel_dp->psr.dc3co_work);
 	/* Before PSR2 exit disallow dc3co*/
-	tgl_psr2_disable_dc3co(dev_priv);
+	tgl_psr2_disable_dc3co(intel_dp);
 }
 
 static void
@@ -715,7 +714,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 	int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
 	int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
 
-	if (!dev_priv->psr.sink_psr2_support)
+	if (!intel_dp->psr.sink_psr2_support)
 		return false;
 
 	if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
@@ -725,7 +724,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 		return false;
 	}
 
-	if (!psr2_global_enabled(dev_priv)) {
+	if (!psr2_global_enabled(intel_dp)) {
 		drm_dbg_kms(&dev_priv->drm, "PSR2 disabled by flag\n");
 		return false;
 	}
@@ -774,10 +773,10 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 	 * only need to validate the SU block width is a multiple of
 	 * x granularity.
 	 */
-	if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
+	if (crtc_hdisplay % intel_dp->psr.su_x_granularity) {
 		drm_dbg_kms(&dev_priv->drm,
 			    "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
-			    crtc_hdisplay, dev_priv->psr.su_x_granularity);
+			    crtc_hdisplay, intel_dp->psr.su_x_granularity);
 		return false;
 	}
 
@@ -819,13 +818,10 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
 	if (crtc_state->vrr.enable)
 		return;
 
-	if (!CAN_PSR(dev_priv))
+	if (!CAN_PSR(intel_dp))
 		return;
 
-	if (intel_dp != dev_priv->psr.dp)
-		return;
-
-	if (!psr_global_enabled(dev_priv)) {
+	if (!psr_global_enabled(intel_dp)) {
 		drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
 		return;
 	}
@@ -833,16 +829,19 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
 	/*
 	 * HSW spec explicitly says PSR is tied to port A.
 	 * BDW+ platforms have a instance of PSR registers per transcoder but
-	 * for now it only supports one instance of PSR, so lets keep it
-	 * hardcoded to PORT_A
+	 * BDW, GEN9 and GEN11 are not validated by HW team in other transcoder
+	 * than eDP one.
+	 * For now it only supports one instance of PSR for BDW, GEN9 and GEN11.
+	 * So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
+	 * But GEN12 supports a instance of PSR registers per transcoder.
 	 */
-	if (dig_port->base.port != PORT_A) {
+	if (INTEL_GEN(dev_priv) < 12 && dig_port->base.port != PORT_A) {
 		drm_dbg_kms(&dev_priv->drm,
 			    "PSR condition failed: Port not supported\n");
 		return;
 	}
 
-	if (dev_priv->psr.sink_not_reliable) {
+	if (intel_dp->psr.sink_not_reliable) {
 		drm_dbg_kms(&dev_priv->drm,
 			    "PSR sink implementation is not reliable\n");
 		return;
@@ -878,23 +877,24 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
 static void intel_psr_activate(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+	enum transcoder transcoder = intel_dp->psr.transcoder;
 
-	if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
+	if (transcoder_has_psr2(dev_priv, transcoder))
 		drm_WARN_ON(&dev_priv->drm,
-			    intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+			    intel_de_read(dev_priv, EDP_PSR2_CTL(transcoder)) & EDP_PSR2_ENABLE);
 
 	drm_WARN_ON(&dev_priv->drm,
-		    intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
-	drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active);
-	lockdep_assert_held(&dev_priv->psr.lock);
+		    intel_de_read(dev_priv, EDP_PSR_CTL(transcoder)) & EDP_PSR_ENABLE);
+	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
+	lockdep_assert_held(&intel_dp->psr.lock);
 
 	/* psr1 and psr2 are mutually exclusive.*/
-	if (dev_priv->psr.psr2_enabled)
+	if (intel_dp->psr.psr2_enabled)
 		hsw_activate_psr2(intel_dp);
 	else
 		hsw_activate_psr1(intel_dp);
 
-	dev_priv->psr.active = true;
+	intel_dp->psr.active = true;
 }
 
 static void intel_psr_enable_source(struct intel_dp *intel_dp,
@@ -910,7 +910,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		hsw_psr_setup_aux(intel_dp);
 
-	if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
+	if (intel_dp->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
 					   !IS_GEMINILAKE(dev_priv))) {
 		i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
 		u32 chicken = intel_de_read(dev_priv, reg);
@@ -934,10 +934,10 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
 	if (INTEL_GEN(dev_priv) < 11)
 		mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
 
-	intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder),
+	intel_de_write(dev_priv, EDP_PSR_DEBUG(intel_dp->psr.transcoder),
 		       mask);
 
-	psr_irq_control(dev_priv);
+	psr_irq_control(intel_dp);
 
 	if (crtc_state->dc3co_exitline) {
 		u32 val;
@@ -955,30 +955,30 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
 
 	if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
-			     dev_priv->psr.psr2_sel_fetch_enabled ?
+			     intel_dp->psr.psr2_sel_fetch_enabled ?
 			     IGNORE_PSR2_HW_TRACKING : 0);
 }
 
-static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
+static void intel_psr_enable_locked(struct intel_dp *intel_dp,
 				    const struct intel_crtc_state *crtc_state,
 				    const struct drm_connector_state *conn_state)
 {
-	struct intel_dp *intel_dp = dev_priv->psr.dp;
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
 	struct intel_encoder *encoder = &dig_port->base;
 	u32 val;
 
-	drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
+	drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
 
-	dev_priv->psr.psr2_enabled = crtc_state->has_psr2;
-	dev_priv->psr.busy_frontbuffer_bits = 0;
-	dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
-	dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
-	dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+	intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+	intel_dp->psr.busy_frontbuffer_bits = 0;
+	intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+	intel_dp->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
+	intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
 	/* DC5/DC6 requires at least 6 idle frames */
 	val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
-	dev_priv->psr.dc3co_exit_delay = val;
-	dev_priv->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
+	intel_dp->psr.dc3co_exit_delay = val;
+	intel_dp->psr.psr2_sel_fetch_enabled = crtc_state->enable_psr2_sel_fetch;
 
 	/*
 	 * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
@@ -990,27 +990,27 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
 	 */
 	if (INTEL_GEN(dev_priv) >= 12) {
 		val = intel_de_read(dev_priv,
-				    TRANS_PSR_IIR(dev_priv->psr.transcoder));
+				    TRANS_PSR_IIR(intel_dp->psr.transcoder));
 		val &= EDP_PSR_ERROR(0);
 	} else {
 		val = intel_de_read(dev_priv, EDP_PSR_IIR);
-		val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
+		val &= EDP_PSR_ERROR(intel_dp->psr.transcoder);
 	}
 	if (val) {
-		dev_priv->psr.sink_not_reliable = true;
+		intel_dp->psr.sink_not_reliable = true;
 		drm_dbg_kms(&dev_priv->drm,
 			    "PSR interruption error set, not enabling PSR\n");
 		return;
 	}
 
 	drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
-		    dev_priv->psr.psr2_enabled ? "2" : "1");
+		    intel_dp->psr.psr2_enabled ? "2" : "1");
 	intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
-				     &dev_priv->psr.vsc);
-	intel_write_dp_vsc_sdp(encoder, crtc_state, &dev_priv->psr.vsc);
+				     &intel_dp->psr.vsc);
+	intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
 	intel_psr_enable_sink(intel_dp);
 	intel_psr_enable_source(intel_dp, crtc_state);
-	dev_priv->psr.enabled = true;
+	intel_dp->psr.enabled = true;
 
 	intel_psr_activate(intel_dp);
 }
@@ -1029,7 +1029,7 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
-	if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp)
+	if (!CAN_PSR(intel_dp))
 		return;
 
 	if (!crtc_state->has_psr)
@@ -1037,46 +1037,47 @@ void intel_psr_enable(struct intel_dp *intel_dp,
 
 	drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
 
-	mutex_lock(&dev_priv->psr.lock);
-	intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_lock(&intel_dp->psr.lock);
+	intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
+	mutex_unlock(&intel_dp->psr.lock);
 }
 
-static void intel_psr_exit(struct drm_i915_private *dev_priv)
+static void intel_psr_exit(struct intel_dp *intel_dp)
 {
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	u32 val;
 
-	if (!dev_priv->psr.active) {
-		if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
+	if (!intel_dp->psr.active) {
+		if (transcoder_has_psr2(dev_priv, intel_dp->psr.transcoder)) {
 			val = intel_de_read(dev_priv,
-					    EDP_PSR2_CTL(dev_priv->psr.transcoder));
+					    EDP_PSR2_CTL(intel_dp->psr.transcoder));
 			drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
 		}
 
 		val = intel_de_read(dev_priv,
-				    EDP_PSR_CTL(dev_priv->psr.transcoder));
+				    EDP_PSR_CTL(intel_dp->psr.transcoder));
 		drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
 
 		return;
 	}
 
-	if (dev_priv->psr.psr2_enabled) {
-		tgl_disallow_dc3co_on_psr2_exit(dev_priv);
+	if (intel_dp->psr.psr2_enabled) {
+		tgl_disallow_dc3co_on_psr2_exit(intel_dp);
 		val = intel_de_read(dev_priv,
-				    EDP_PSR2_CTL(dev_priv->psr.transcoder));
+				    EDP_PSR2_CTL(intel_dp->psr.transcoder));
 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
 		val &= ~EDP_PSR2_ENABLE;
 		intel_de_write(dev_priv,
-			       EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+			       EDP_PSR2_CTL(intel_dp->psr.transcoder), val);
 	} else {
 		val = intel_de_read(dev_priv,
-				    EDP_PSR_CTL(dev_priv->psr.transcoder));
+				    EDP_PSR_CTL(intel_dp->psr.transcoder));
 		drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
 		val &= ~EDP_PSR_ENABLE;
 		intel_de_write(dev_priv,
-			       EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+			       EDP_PSR_CTL(intel_dp->psr.transcoder), val);
 	}
-	dev_priv->psr.active = false;
+	intel_dp->psr.active = false;
 }
 
 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
@@ -1085,21 +1086,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 	i915_reg_t psr_status;
 	u32 psr_status_mask;
 
-	lockdep_assert_held(&dev_priv->psr.lock);
+	lockdep_assert_held(&intel_dp->psr.lock);
 
-	if (!dev_priv->psr.enabled)
+	if (!intel_dp->psr.enabled)
 		return;
 
 	drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
-		    dev_priv->psr.psr2_enabled ? "2" : "1");
+		    intel_dp->psr.psr2_enabled ? "2" : "1");
 
-	intel_psr_exit(dev_priv);
+	intel_psr_exit(intel_dp);
 
-	if (dev_priv->psr.psr2_enabled) {
-		psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
+	if (intel_dp->psr.psr2_enabled) {
+		psr_status = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
 		psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
 	} else {
-		psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder);
+		psr_status = EDP_PSR_STATUS(intel_dp->psr.transcoder);
 		psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
 	}
 
@@ -1109,7 +1110,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 		drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
 
 	/* WA 1408330847 */
-	if (dev_priv->psr.psr2_sel_fetch_enabled &&
+	if (intel_dp->psr.psr2_sel_fetch_enabled &&
 	    (IS_TGL_DISP_STEPPING(dev_priv, STEP_A0, STEP_A0) ||
 	     IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
 		intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
@@ -1118,10 +1119,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 	/* Disable PSR on Sink */
 	drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
 
-	if (dev_priv->psr.psr2_enabled)
+	if (intel_dp->psr.psr2_enabled)
 		drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
 
-	dev_priv->psr.enabled = false;
+	intel_dp->psr.enabled = false;
 }
 
 /**
@@ -1139,20 +1140,22 @@ void intel_psr_disable(struct intel_dp *intel_dp,
 	if (!old_crtc_state->has_psr)
 		return;
 
-	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv)))
+	if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
 		return;
 
-	mutex_lock(&dev_priv->psr.lock);
+	mutex_lock(&intel_dp->psr.lock);
 
 	intel_psr_disable_locked(intel_dp);
 
-	mutex_unlock(&dev_priv->psr.lock);
-	cancel_work_sync(&dev_priv->psr.work);
-	cancel_delayed_work_sync(&dev_priv->psr.dc3co_work);
+	mutex_unlock(&intel_dp->psr.lock);
+	cancel_work_sync(&intel_dp->psr.work);
+	cancel_delayed_work_sync(&intel_dp->psr.dc3co_work);
 }
 
-static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
 {
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
 	if (IS_TIGERLAKE(dev_priv))
 		/*
 		 * Writes to CURSURFLIVE in TGL are causing IOMMU errors and
@@ -1166,7 +1169,7 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
 		 * So using this workaround until this issue is root caused
 		 * and a better fix is found.
 		 */
-		intel_psr_exit(dev_priv);
+		intel_psr_exit(intel_dp);
 	else if (INTEL_GEN(dev_priv) >= 9)
 		/*
 		 * Display WA #0884: skl+
@@ -1177,13 +1180,13 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
 		 * but it makes more sense write to the current active
 		 * pipe.
 		 */
-		intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0);
+		intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
 	else
 		/*
 		 * A write to CURSURFLIVE do not cause HW tracking to exit PSR
 		 * on older gens so doing the manual exit instead.
 		 */
-		intel_psr_exit(dev_priv);
+		intel_psr_exit(intel_dp);
 }
 
 void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
@@ -1231,16 +1234,19 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
 
 void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
 {
-	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-	struct i915_psr *psr = &dev_priv->psr;
+	struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+	struct intel_encoder *encoder;
 
 	if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
 	    !crtc_state->enable_psr2_sel_fetch)
 		return;
 
-	intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(psr->transcoder),
-		       crtc_state->psr2_man_track_ctl);
+	for_each_intel_encoder_is_psr_enabled(&dev_priv->drm, encoder,
+					      crtc_state->uapi.encoder_mask) {
+		intel_de_write(dev_priv,
+			       PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
+			       crtc_state->psr2_man_track_ctl);
+	}
 }
 
 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -1435,13 +1441,13 @@ void intel_psr_update(struct intel_dp *intel_dp,
 		      const struct drm_connector_state *conn_state)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct i915_psr *psr = &dev_priv->psr;
+	struct intel_psr *psr = &intel_dp->psr;
 	bool enable, psr2_enable;
 
-	if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+	if (!CAN_PSR(intel_dp))
 		return;
 
-	mutex_lock(&dev_priv->psr.lock);
+	mutex_lock(&intel_dp->psr.lock);
 
 	enable = crtc_state->has_psr;
 	psr2_enable = crtc_state->has_psr2;
@@ -1449,15 +1455,15 @@ void intel_psr_update(struct intel_dp *intel_dp,
 	if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
 		/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
 		if (crtc_state->crc_enabled && psr->enabled)
-			psr_force_hw_tracking_exit(dev_priv);
+			psr_force_hw_tracking_exit(intel_dp);
 		else if (INTEL_GEN(dev_priv) < 9 && psr->enabled) {
 			/*
 			 * Activate PSR again after a force exit when enabling
 			 * CRC in older gens
 			 */
-			if (!dev_priv->psr.active &&
-			    !dev_priv->psr.busy_frontbuffer_bits)
-				schedule_work(&dev_priv->psr.work);
+			if (!intel_dp->psr.active &&
+			    !intel_dp->psr.busy_frontbuffer_bits)
+				schedule_work(&intel_dp->psr.work);
 		}
 
 		goto unlock;
@@ -1467,34 +1473,23 @@ void intel_psr_update(struct intel_dp *intel_dp,
 		intel_psr_disable_locked(intel_dp);
 
 	if (enable)
-		intel_psr_enable_locked(dev_priv, crtc_state, conn_state);
+		intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
 
 unlock:
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&intel_dp->psr.lock);
 }
 
 /**
- * intel_psr_wait_for_idle - wait for PSR1 to idle
- * @new_crtc_state: new CRTC state
+ * psr_wait_for_idle - wait for PSR1 to idle
+ * @intel_dp: Intel DP
  * @out_value: PSR status in case of failure
  *
- * This function is expected to be called from pipe_update_start() where it is
- * not expected to race with PSR enable or disable.
- *
  * Returns: 0 on success or -ETIMEOUT if PSR status does not idle.
+ *
  */
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
-			    u32 *out_value)
+static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value)
 {
-	struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
-	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
-	if (!dev_priv->psr.enabled || !new_crtc_state->has_psr)
-		return 0;
-
-	/* FIXME: Update this for PSR2 if we need to wait for idle */
-	if (READ_ONCE(dev_priv->psr.psr2_enabled))
-		return 0;
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 	/*
 	 * From bspec: Panel Self Refresh (BDW+)
@@ -1502,32 +1497,63 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
 	 * exit training time + 1.5 ms of aux channel handshake. 50 ms is
 	 * defensive enough to cover everything.
 	 */
-
 	return __intel_wait_for_register(&dev_priv->uncore,
-					 EDP_PSR_STATUS(dev_priv->psr.transcoder),
+					 EDP_PSR_STATUS(intel_dp->psr.transcoder),
 					 EDP_PSR_STATUS_STATE_MASK,
 					 EDP_PSR_STATUS_STATE_IDLE, 2, 50,
 					 out_value);
 }
 
-static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
+/**
+ * intel_psr_wait_for_idle - wait for PSR1 to idle
+ * @new_crtc_state: new CRTC state
+ *
+ * This function is expected to be called from pipe_update_start() where it is
+ * not expected to race with PSR enable or disable.
+ */
+void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
+{
+	struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
+	struct intel_encoder *encoder;
+
+	if (!new_crtc_state->has_psr)
+		return;
+
+	for_each_intel_encoder_is_psr_enabled(&dev_priv->drm, encoder,
+					      new_crtc_state->uapi.encoder_mask) {
+		struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+		/* when the PSR1 is enabled */
+		if (!intel_dp->psr.psr2_enabled) {
+			u32 psr_status;
+
+			if (psr_wait_for_idle(intel_dp, &psr_status))
+				drm_err(&dev_priv->drm,
+					"PSR idle timed out 0x%x, atomic update may fail\n",
+					psr_status);
+		}
+	}
+}
+
+static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
 {
 	i915_reg_t reg;
 	u32 mask;
 	int err;
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
-	if (!dev_priv->psr.enabled)
+	if (!intel_dp->psr.enabled)
 		return false;
 
-	if (dev_priv->psr.psr2_enabled) {
-		reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder);
+	if (intel_dp->psr.psr2_enabled) {
+		reg = EDP_PSR2_STATUS(intel_dp->psr.transcoder);
 		mask = EDP_PSR2_STATUS_STATE_MASK;
 	} else {
-		reg = EDP_PSR_STATUS(dev_priv->psr.transcoder);
+		reg = EDP_PSR_STATUS(intel_dp->psr.transcoder);
 		mask = EDP_PSR_STATUS_STATE_MASK;
 	}
 
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&intel_dp->psr.lock);
 
 	err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
 	if (err)
@@ -1535,8 +1561,8 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
 			"Timed out waiting for PSR Idle for re-enable\n");
 
 	/* After the unlocked wait, verify that PSR is still wanted! */
-	mutex_lock(&dev_priv->psr.lock);
-	return err == 0 && dev_priv->psr.enabled;
+	mutex_lock(&intel_dp->psr.lock);
+	return err == 0 && intel_dp->psr.enabled;
 }
 
 static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
@@ -1602,11 +1628,12 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
 	return err;
 }
 
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
+int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
 {
 	const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
 	u32 old_mode;
 	int ret;
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 
 	if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
 	    mode > I915_PSR_DEBUG_FORCE_PSR1) {
@@ -1614,21 +1641,21 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
 		return -EINVAL;
 	}
 
-	ret = mutex_lock_interruptible(&dev_priv->psr.lock);
+	ret = mutex_lock_interruptible(&intel_dp->psr.lock);
 	if (ret)
 		return ret;
 
-	old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
-	dev_priv->psr.debug = val;
+	old_mode = intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK;
+	intel_dp->psr.debug = val;
 
 	/*
 	 * Do it right away if it's already enabled, otherwise it will be done
 	 * when enabling the source.
 	 */
-	if (dev_priv->psr.enabled)
-		psr_irq_control(dev_priv);
+	if (intel_dp->psr.enabled)
+		psr_irq_control(intel_dp);
 
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&intel_dp->psr.lock);
 
 	if (old_mode != mode)
 		ret = intel_psr_fastset_force(dev_priv);
@@ -1636,28 +1663,28 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
 	return ret;
 }
 
-static void intel_psr_handle_irq(struct drm_i915_private *dev_priv)
+static void intel_psr_handle_irq(struct intel_dp *intel_dp)
 {
-	struct i915_psr *psr = &dev_priv->psr;
+	struct intel_psr *psr = &intel_dp->psr;
 
-	intel_psr_disable_locked(psr->dp);
+	intel_psr_disable_locked(intel_dp);
 	psr->sink_not_reliable = true;
 	/* let's make sure that sink is awaken */
-	drm_dp_dpcd_writeb(&psr->dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
+	drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
 }
 
 static void intel_psr_work(struct work_struct *work)
 {
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), psr.work);
+	struct intel_dp *intel_dp =
+		container_of(work, typeof(*intel_dp), psr.work);
 
-	mutex_lock(&dev_priv->psr.lock);
+	mutex_lock(&intel_dp->psr.lock);
 
-	if (!dev_priv->psr.enabled)
+	if (!intel_dp->psr.enabled)
 		goto unlock;
 
-	if (READ_ONCE(dev_priv->psr.irq_aux_error))
-		intel_psr_handle_irq(dev_priv);
+	if (READ_ONCE(intel_dp->psr.irq_aux_error))
+		intel_psr_handle_irq(intel_dp);
 
 	/*
 	 * We have to make sure PSR is ready for re-enable
@@ -1665,7 +1692,7 @@ static void intel_psr_work(struct work_struct *work)
 	 * PSR might take some time to get fully disabled
 	 * and be ready for re-enable.
 	 */
-	if (!__psr_wait_for_idle_locked(dev_priv))
+	if (!__psr_wait_for_idle_locked(intel_dp))
 		goto unlock;
 
 	/*
@@ -1673,12 +1700,12 @@ static void intel_psr_work(struct work_struct *work)
 	 * recheck. Since psr_flush first clears this and then reschedules we
 	 * won't ever miss a flush when bailing out here.
 	 */
-	if (dev_priv->psr.busy_frontbuffer_bits || dev_priv->psr.active)
+	if (intel_dp->psr.busy_frontbuffer_bits || intel_dp->psr.active)
 		goto unlock;
 
-	intel_psr_activate(dev_priv->psr.dp);
+	intel_psr_activate(intel_dp);
 unlock:
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&intel_dp->psr.lock);
 }
 
 /**
@@ -1697,27 +1724,35 @@ static void intel_psr_work(struct work_struct *work)
 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-	if (!CAN_PSR(dev_priv))
-		return;
+	struct intel_encoder *encoder;
+	struct intel_dp *intel_dp;
 
-	if (origin == ORIGIN_FLIP)
-		return;
+	for_each_intel_dp(&dev_priv->drm, encoder) {
+		unsigned pipe_frontbuffer_bits = frontbuffer_bits;
 
-	mutex_lock(&dev_priv->psr.lock);
-	if (!dev_priv->psr.enabled) {
-		mutex_unlock(&dev_priv->psr.lock);
-		return;
-	}
+		intel_dp = enc_to_intel_dp(encoder);
+		if (!CAN_PSR(intel_dp))
+			continue;
+
+		if (origin == ORIGIN_FLIP)
+			continue;
+
+		mutex_lock(&intel_dp->psr.lock);
+		if (!intel_dp->psr.enabled) {
+			mutex_unlock(&intel_dp->psr.lock);
+			continue;
+		}
 
-	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
-	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
+		pipe_frontbuffer_bits &=
+			INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+		intel_dp->psr.busy_frontbuffer_bits |= pipe_frontbuffer_bits;
 
-	if (frontbuffer_bits)
-		intel_psr_exit(dev_priv);
+		if (pipe_frontbuffer_bits)
+			intel_psr_exit(intel_dp);
 
-	mutex_unlock(&dev_priv->psr.lock);
+		mutex_unlock(&intel_dp->psr.lock);
+	}
 }
-
 /*
  * When we will be completely rely on PSR2 S/W tracking in future,
  * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
@@ -1725,15 +1760,15 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
  * accordingly in future.
  */
 static void
-tgl_dc3co_flush(struct drm_i915_private *dev_priv,
-		unsigned int frontbuffer_bits, enum fb_op_origin origin)
+tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
+		enum fb_op_origin origin)
 {
-	mutex_lock(&dev_priv->psr.lock);
+	mutex_lock(&intel_dp->psr.lock);
 
-	if (!dev_priv->psr.dc3co_enabled)
+	if (!intel_dp->psr.dc3co_enabled)
 		goto unlock;
 
-	if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active)
+	if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
 		goto unlock;
 
 	/*
@@ -1741,15 +1776,15 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv,
 	 * when delayed work schedules that means display has been idle.
 	 */
 	if (!(frontbuffer_bits &
-	    INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe)))
+	    INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
 		goto unlock;
 
-	tgl_psr2_enable_dc3co(dev_priv);
-	mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work,
-			 dev_priv->psr.dc3co_exit_delay);
+	tgl_psr2_enable_dc3co(intel_dp);
+	mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
+			 intel_dp->psr.dc3co_exit_delay);
 
 unlock:
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_unlock(&intel_dp->psr.lock);
 }
 
 /**
@@ -1768,45 +1803,57 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv,
 void intel_psr_flush(struct drm_i915_private *dev_priv,
 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-	if (!CAN_PSR(dev_priv))
-		return;
+	struct intel_encoder *encoder;
+	struct intel_dp *intel_dp;
 
-	if (origin == ORIGIN_FLIP) {
-		tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin);
-		return;
-	}
+	for_each_intel_dp(&dev_priv->drm, encoder) {
+		intel_dp = enc_to_intel_dp(encoder);
 
-	mutex_lock(&dev_priv->psr.lock);
-	if (!dev_priv->psr.enabled) {
-		mutex_unlock(&dev_priv->psr.lock);
-		return;
-	}
+		if (CAN_PSR(intel_dp)) {
+			unsigned pipe_frontbuffer_bits = frontbuffer_bits;
 
-	frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe);
-	dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
+			if (origin == ORIGIN_FLIP) {
+				tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
+				continue;
+			}
+
+			mutex_lock(&intel_dp->psr.lock);
+			if (!intel_dp->psr.enabled) {
+				mutex_unlock(&intel_dp->psr.lock);
+				continue;
+			}
 
-	/* By definition flush = invalidate + flush */
-	if (frontbuffer_bits)
-		psr_force_hw_tracking_exit(dev_priv);
+			pipe_frontbuffer_bits &=
+				INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe);
+			intel_dp->psr.busy_frontbuffer_bits &= ~pipe_frontbuffer_bits;
 
-	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
-		schedule_work(&dev_priv->psr.work);
-	mutex_unlock(&dev_priv->psr.lock);
+			/* By definition flush = invalidate + flush */
+			if (pipe_frontbuffer_bits)
+				psr_force_hw_tracking_exit(intel_dp);
+
+			if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
+				schedule_work(&intel_dp->psr.work);
+			mutex_unlock(&intel_dp->psr.lock);
+		}
+	}
 }
 
 /**
  * intel_psr_init - Init basic PSR work and mutex.
- * @dev_priv: i915 device private
+ * @intel_dp: Intel DP
  *
- * This function is  called only once at driver load to initialize basic
- * PSR stuff.
+ * This function is called after the initializing connector.
+ * (the initializing of connector treats the handling of connector capabilities)
+ * And it initializes basic PSR stuff for each DP Encoder.
  */
-void intel_psr_init(struct drm_i915_private *dev_priv)
+void intel_psr_init(struct intel_dp *intel_dp)
 {
+	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
 	if (!HAS_PSR(dev_priv))
 		return;
 
-	if (!dev_priv->psr.sink_support)
+	if (!intel_dp->psr.sink_support)
 		return;
 
 	if (IS_HASWELL(dev_priv))
@@ -1824,14 +1871,14 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
 	/* Set link_standby x link_off defaults */
 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 		/* HSW and BDW require workarounds that we don't implement. */
-		dev_priv->psr.link_standby = false;
+		intel_dp->psr.link_standby = false;
 	else if (INTEL_GEN(dev_priv) < 12)
 		/* For new platforms up to TGL let's respect VBT back again */
-		dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+		intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
 
-	INIT_WORK(&dev_priv->psr.work, intel_psr_work);
-	INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work);
-	mutex_init(&dev_priv->psr.lock);
+	INIT_WORK(&intel_dp->psr.work, intel_psr_work);
+	INIT_DELAYED_WORK(&intel_dp->psr.dc3co_work, tgl_dc3co_disable_work);
+	mutex_init(&intel_dp->psr.lock);
 }
 
 static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
@@ -1857,7 +1904,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	struct drm_dp_aux *aux = &intel_dp->aux;
-	struct i915_psr *psr = &dev_priv->psr;
+	struct intel_psr *psr = &intel_dp->psr;
 	u8 val;
 	int r;
 
@@ -1884,7 +1931,7 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
 static void psr_capability_changed_check(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct i915_psr *psr = &dev_priv->psr;
+	struct intel_psr *psr = &intel_dp->psr;
 	u8 val;
 	int r;
 
@@ -1908,18 +1955,18 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
 void intel_psr_short_pulse(struct intel_dp *intel_dp)
 {
 	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-	struct i915_psr *psr = &dev_priv->psr;
+	struct intel_psr *psr = &intel_dp->psr;
 	u8 status, error_status;
 	const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
 			  DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
 			  DP_PSR_LINK_CRC_ERROR;
 
-	if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+	if (!CAN_PSR(intel_dp) || !intel_dp_is_edp(intel_dp))
 		return;
 
 	mutex_lock(&psr->lock);
 
-	if (!psr->enabled || psr->dp != intel_dp)
+	if (!psr->enabled)
 		goto exit;
 
 	if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
@@ -1962,15 +2009,14 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
 
 bool intel_psr_enabled(struct intel_dp *intel_dp)
 {
-	struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
 	bool ret;
 
-	if (!CAN_PSR(dev_priv) || !intel_dp_is_edp(intel_dp))
+	if (!CAN_PSR(intel_dp) || !intel_dp_is_edp(intel_dp))
 		return false;
 
-	mutex_lock(&dev_priv->psr.lock);
-	ret = (dev_priv->psr.dp == intel_dp && dev_priv->psr.enabled);
-	mutex_unlock(&dev_priv->psr.lock);
+	mutex_lock(&intel_dp->psr.lock);
+	ret = intel_dp->psr.enabled;
+	mutex_unlock(&intel_dp->psr.lock);
 
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 0a517978e8af..2753976acc26 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -18,7 +18,7 @@ struct intel_atomic_state;
 struct intel_plane_state;
 struct intel_plane;
 
-#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
+#define CAN_PSR(intel_dp) (HAS_PSR(dp_to_i915(intel_dp)) && intel_dp->psr.sink_support)
 void intel_psr_init_dpcd(struct intel_dp *intel_dp);
 void intel_psr_enable(struct intel_dp *intel_dp,
 		      const struct intel_crtc_state *crtc_state,
@@ -28,20 +28,19 @@ void intel_psr_disable(struct intel_dp *intel_dp,
 void intel_psr_update(struct intel_dp *intel_dp,
 		      const struct intel_crtc_state *crtc_state,
 		      const struct drm_connector_state *conn_state);
-int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
+int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value);
 void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits,
 			  enum fb_op_origin origin);
 void intel_psr_flush(struct drm_i915_private *dev_priv,
 		     unsigned frontbuffer_bits,
 		     enum fb_op_origin origin);
-void intel_psr_init(struct drm_i915_private *dev_priv);
+void intel_psr_init(struct intel_dp *intel_dp);
 void intel_psr_compute_config(struct intel_dp *intel_dp,
 			      struct intel_crtc_state *crtc_state);
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
 void intel_psr_short_pulse(struct intel_dp *intel_dp);
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
-			    u32 *out_value);
+void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
 bool intel_psr_enabled(struct intel_dp *intel_dp);
 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
 				struct intel_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 52120f56dc54..986028deffd2 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -96,7 +96,6 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
 	bool need_vlv_dsi_wa = (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
 		intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI);
 	DEFINE_WAIT(wait);
-	u32 psr_status;
 
 	if (new_crtc_state->uapi.async_flip)
 		return;
@@ -122,10 +121,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
 	 * VBL interrupts will start the PSR exit and prevent a PSR
 	 * re-entry as well.
 	 */
-	if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
-		drm_err(&dev_priv->drm,
-			"PSR idle timed out 0x%x, atomic update may fail\n",
-			psr_status);
+	intel_psr_wait_for_idle(new_crtc_state);
 
 	local_irq_disable();
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3edc9c4f2d21..3816b12aac0c 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -478,42 +478,6 @@ struct i915_drrs {
 	enum drrs_support_type type;
 };
 
-struct i915_psr {
-	struct mutex lock;
-
-#define I915_PSR_DEBUG_MODE_MASK	0x0f
-#define I915_PSR_DEBUG_DEFAULT		0x00
-#define I915_PSR_DEBUG_DISABLE		0x01
-#define I915_PSR_DEBUG_ENABLE		0x02
-#define I915_PSR_DEBUG_FORCE_PSR1	0x03
-#define I915_PSR_DEBUG_IRQ		0x10
-
-	u32 debug;
-	bool sink_support;
-	bool enabled;
-	struct intel_dp *dp;
-	enum pipe pipe;
-	enum transcoder transcoder;
-	bool active;
-	struct work_struct work;
-	unsigned busy_frontbuffer_bits;
-	bool sink_psr2_support;
-	bool link_standby;
-	bool colorimetry_support;
-	bool psr2_enabled;
-	bool psr2_sel_fetch_enabled;
-	u8 sink_sync_latency;
-	ktime_t last_entry_attempt;
-	ktime_t last_exit;
-	bool sink_not_reliable;
-	bool irq_aux_error;
-	u16 su_x_granularity;
-	bool dc3co_enabled;
-	u32 dc3co_exit_delay;
-	struct delayed_work dc3co_work;
-	struct drm_dp_vsc_sdp vsc;
-};
-
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
@@ -1041,8 +1005,6 @@ struct drm_i915_private {
 
 	struct i915_power_domains power_domains;
 
-	struct i915_psr psr;
-
 	struct i915_gpu_error gpu_error;
 
 	struct drm_i915_gem_object *vlv_pctx;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a31980f69120..907a366e9c32 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2088,10 +2088,22 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
 		ivb_err_int_handler(dev_priv);
 
 	if (de_iir & DE_EDP_PSR_INT_HSW) {
-		u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR);
+		struct intel_encoder *encoder;
 
-		intel_psr_irq_handler(dev_priv, psr_iir);
-		intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir);
+		for_each_intel_dp(&dev_priv->drm, encoder) {
+			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+			if (encoder->type == INTEL_OUTPUT_EDP &&
+			    CAN_PSR(intel_dp)) {
+				u32 psr_iir = intel_uncore_read(&dev_priv->uncore,
+								EDP_PSR_IIR);
+
+				intel_psr_irq_handler(intel_dp, psr_iir);
+				intel_uncore_write(&dev_priv->uncore,
+						   EDP_PSR_IIR, psr_iir);
+				break;
+			}
+		}
 	}
 
 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
@@ -2301,21 +2313,33 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
 	}
 
 	if (iir & GEN8_DE_EDP_PSR) {
+		struct intel_encoder *encoder;
 		u32 psr_iir;
 		i915_reg_t iir_reg;
 
-		if (INTEL_GEN(dev_priv) >= 12)
-			iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder);
-		else
-			iir_reg = EDP_PSR_IIR;
+		for_each_intel_dp(&dev_priv->drm, encoder) {
+			struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+			if (!CAN_PSR(intel_dp))
+				continue;
+
+			if (INTEL_GEN(dev_priv) >= 12)
+				iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
+			else
+				iir_reg = EDP_PSR_IIR;
 
-		psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
-		intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
+			psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
+			intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
 
-		if (psr_iir)
-			found = true;
+			if (psr_iir)
+				found = true;
 
-		intel_psr_irq_handler(dev_priv, psr_iir);
+			intel_psr_irq_handler(intel_dp, psr_iir);
+
+			/* prior GEN12 only have one EDP PSR */
+			if (INTEL_GEN(dev_priv) < 12)
+				break;
+		}
 	}
 
 	if (!found)
-- 
2.30.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx





[Index of Archives]     [AMD Graphics]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux