+ ctx->pending_flush_mask |= BIT(CDM_IDX);
+}
+
static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
enum dpu_dspp dspp, u32 dspp_sub_blk)
{
@@ -504,6 +524,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 intf_active = 0;
u32 wb_active = 0;
u32 mode_sel = 0;
+ u32 cdm_active = 0;
/* CTL_TOP[31:28] carries group_id to collate CTL paths
* per VM. Explicitly disable it until VM support is
@@ -517,6 +538,7 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
+ cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE);
if (cfg->intf)
intf_active |= BIT(cfg->intf - INTF_0);
@@ -534,6 +556,9 @@ static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
if (cfg->dsc)
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, cfg->dsc);
+
+ if (cfg->cdm)
+ DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
}
static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
@@ -577,6 +602,7 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
u32 wb_active = 0;
u32 merge3d_active = 0;
u32 dsc_active;
+ u32 cdm_active;
/*
* This API resets each portion of the CTL path namely,
@@ -612,6 +638,12 @@ static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
dsc_active &= ~cfg->dsc;
DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
}
+
+ if (cfg->cdm) {
+ cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE);
+ cdm_active &= ~cfg->cdm;
+ DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
+ }
}
static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
@@ -645,12 +677,14 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
ops->update_pending_flush_dsc =
dpu_hw_ctl_update_pending_flush_dsc_v1;
+ ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
} else {
ops->trigger_flush = dpu_hw_ctl_trigger_flush;
ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
ops->update_pending_flush_intf =
dpu_hw_ctl_update_pending_flush_intf;
ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
+ ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
}
ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 1c242298ff2e..6dd44dfdfb61 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -39,6 +39,7 @@ struct dpu_hw_stage_cfg {
* @mode_3d: 3d mux configuration
* @merge_3d: 3d merge block used
* @intf_mode_sel: Interface mode, cmd / vid
+ * @cdm: CDM block used
* @stream_sel: Stream selection for multi-stream interfaces
* @dsc: DSC BIT masks used
*/
@@ -48,6 +49,7 @@ struct dpu_hw_intf_cfg {
enum dpu_3d_blend_mode mode_3d;
enum dpu_merge_3d merge_3d;
enum dpu_ctl_mode_sel intf_mode_sel;
+ enum dpu_cdm cdm;
int stream_sel;
unsigned int dsc;
};
@@ -166,6 +168,13 @@ struct dpu_hw_ctl_ops {
void (*update_pending_flush_dsc)(struct dpu_hw_ctl *ctx,
enum dpu_dsc blk);
+ /**
+ * OR in the given flushbits to the cached pending_(cdm_)flush_mask
+ * No effect on hardware
+ * @ctx: ctl path ctx pointer
+ */
+ void (*update_pending_flush_cdm)(struct dpu_hw_ctl *ctx);
+
/**
* Write the value of the pending_flush_mask to hardware
* @ctx : ctl path ctx pointer
@@ -239,6 +248,7 @@ struct dpu_hw_ctl_ops {
* @pending_intf_flush_mask: pending INTF flush
* @pending_wb_flush_mask: pending WB flush
* @pending_dsc_flush_mask: pending DSC flush
+ * @pending_cdm_flush_mask: pending CDM flush
* @ops: operation list
*/
struct dpu_hw_ctl {
@@ -256,6 +266,7 @@ struct dpu_hw_ctl {
u32 pending_merge_3d_flush_mask;
u32 pending_dspp_flush_mask[DSPP_MAX - DSPP_0];
u32 pending_dsc_flush_mask;
+ u32 pending_cdm_flush_mask;
/* ops */
struct dpu_hw_ctl_ops ops;
--
2.40.1