Currently struct drm_dsc_config for DSI is populated at display setup during system boot up. This mechanism works fine with embedded display but not for pluggable displays as the struct drm_dsc_config will become stale once external display is unplugged. Move storing of DSI DSC struct to atomic_enable() so that same mechanism will work for both embedded display and pluggable displays. Signed-off-by: Kuogee Hsieh <quic_khsieh@xxxxxxxxxxx> --- drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c | 42 ++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c index 2e1873d..e00cd39 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c @@ -543,11 +543,24 @@ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc) return (num_dsc > 0) && (num_dsc > intf_count); } +static struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc) +{ + struct msm_drm_private *priv = drm_enc->dev->dev_private; + struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); + int index = dpu_enc->disp_info.h_tile_instance[0]; + + if (dpu_enc->disp_info.intf_type == INTF_DSI) + return msm_dsi_get_dsc_config(priv->dsi[index]); + + return NULL; +} + static struct msm_display_topology dpu_encoder_get_topology( struct dpu_encoder_virt *dpu_enc, struct dpu_kms *dpu_kms, struct drm_display_mode *mode, - struct drm_crtc_state *crtc_state) + struct drm_crtc_state *crtc_state, + struct drm_dsc_config *dsc) { struct msm_display_topology topology = {0}; int i, intf_count = 0; @@ -579,7 +592,7 @@ static struct msm_display_topology dpu_encoder_get_topology( topology.num_intf = intf_count; - if (dpu_enc->dsc) { + if (dsc) { /* * In case of Display Stream Compression (DSC), we would use * 2 DSC encoders, 2 layer mixers and 1 interface @@ -605,6 +618,7 @@ static int dpu_encoder_virt_atomic_check( struct drm_display_mode *adj_mode; struct msm_display_topology topology; struct dpu_global_state *global_state; + struct drm_dsc_config *dsc; int i = 0; int ret = 0; @@ -640,7 +654,9 @@ static int dpu_encoder_virt_atomic_check( } } - topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state); + dsc = dpu_encoder_get_dsc_config(drm_enc); + + topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc); /* * Release and Allocate resources on every modeset @@ -1072,14 +1088,12 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc, dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i]) : NULL; - if (dpu_enc->dsc) { - num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, - drm_enc->base.id, DPU_HW_BLK_DSC, - hw_dsc, ARRAY_SIZE(hw_dsc)); - for (i = 0; i < num_dsc; i++) { - dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]); - dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0); - } + num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state, + drm_enc->base.id, DPU_HW_BLK_DSC, + hw_dsc, ARRAY_SIZE(hw_dsc)); + for (i = 0; i < num_dsc; i++) { + dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]); + dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0); } dpu_enc->dsc_mask = dsc_mask; @@ -1187,6 +1201,8 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc, dpu_enc = to_dpu_encoder_virt(drm_enc); + dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc); + mutex_lock(&dpu_enc->enc_lock); cur_mode = &dpu_enc->base.crtc->state->adjusted_mode; @@ -2109,8 +2125,10 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc) phys_enc->hw_pp->merge_3d->idx); } - if (dpu_enc->dsc) + if (dpu_enc->dsc) { dpu_encoder_unprep_dsc(dpu_enc); + dpu_enc->dsc = NULL; + } intf_cfg.stream_sel = 0; /* Don't care value for video mode */ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc); -- 2.7.4