From: Dmytro Laktyushkin <Dmytro.Laktyushkin@xxxxxxx> Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com> Reviewed-by: Dmytro Laktyushkin <Dmytro.Laktyushkin at amd.com> Acked-by: Harry Wentland <harry.wentland at amd.com> --- drivers/gpu/drm/amd/display/dc/dc_helper.c | 59 ++++++++ drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c | 153 ++++++++++++++++++++- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h | 19 +-- .../drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 114 ++++++++++++++- drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 20 +++ drivers/gpu/drm/amd/display/dc/inc/reg_helper.h | 56 ++++++++ 6 files changed, 401 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index 48e1fcf53d43..bd0fda0ceb91 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -117,6 +117,65 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr, return reg_val; } +uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6) +{ + uint32_t reg_val = dm_read_reg(ctx, addr); + *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); + *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); + *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); + *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); + *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); + *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); + return reg_val; +} + +uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6, + uint8_t shift7, uint32_t mask7, uint32_t *field_value7) +{ + uint32_t reg_val = dm_read_reg(ctx, addr); + *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); + *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); + *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); + *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); + *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); + *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); + *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); + return reg_val; +} + +uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6, + uint8_t shift7, uint32_t mask7, uint32_t *field_value7, + uint8_t shift8, uint32_t mask8, uint32_t *field_value8) +{ + uint32_t reg_val = dm_read_reg(ctx, addr); + *field_value1 = get_reg_field_value_ex(reg_val, mask1, shift1); + *field_value2 = get_reg_field_value_ex(reg_val, mask2, shift2); + *field_value3 = get_reg_field_value_ex(reg_val, mask3, shift3); + *field_value4 = get_reg_field_value_ex(reg_val, mask4, shift4); + *field_value5 = get_reg_field_value_ex(reg_val, mask5, shift5); + *field_value6 = get_reg_field_value_ex(reg_val, mask6, shift6); + *field_value7 = get_reg_field_value_ex(reg_val, mask7, shift7); + *field_value8 = get_reg_field_value_ex(reg_val, mask8, shift8); + return reg_val; +} /* note: va version of this is pretty bad idea, since there is a output parameter pass by pointer * compiler won't be able to check for size match and is prone to stack corruption type of bugs diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 4ca9b6e9a824..58062172cf3f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -756,9 +756,159 @@ void min_set_viewport( PRI_VIEWPORT_Y_START_C, viewport_c->y); } -void hubp1_read_state(struct dcn10_hubp *hubp1, +void hubp1_read_state(struct hubp *hubp, struct dcn_hubp_state *s) { + struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); + struct _vcs_dpi_display_dlg_regs_st *dlg_attr = &s->dlg_attr; + struct _vcs_dpi_display_ttu_regs_st *ttu_attr = &s->ttu_attr; + struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; + + /* Requester */ + REG_GET(HUBPRET_CONTROL, + DET_BUF_PLANE1_BASE_ADDRESS, &rq_regs->plane1_base_address); + REG_GET_4(DCN_EXPANSION_MODE, + DRQ_EXPANSION_MODE, &rq_regs->drq_expansion_mode, + PRQ_EXPANSION_MODE, &rq_regs->prq_expansion_mode, + MRQ_EXPANSION_MODE, &rq_regs->mrq_expansion_mode, + CRQ_EXPANSION_MODE, &rq_regs->crq_expansion_mode); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG, + CHUNK_SIZE, &rq_regs->rq_regs_l.chunk_size, + MIN_CHUNK_SIZE, &rq_regs->rq_regs_l.min_chunk_size, + META_CHUNK_SIZE, &rq_regs->rq_regs_l.meta_chunk_size, + MIN_META_CHUNK_SIZE, &rq_regs->rq_regs_l.min_meta_chunk_size, + DPTE_GROUP_SIZE, &rq_regs->rq_regs_l.dpte_group_size, + MPTE_GROUP_SIZE, &rq_regs->rq_regs_l.mpte_group_size, + SWATH_HEIGHT, &rq_regs->rq_regs_l.swath_height, + PTE_ROW_HEIGHT_LINEAR, &rq_regs->rq_regs_l.pte_row_height_linear); + REG_GET_8(DCHUBP_REQ_SIZE_CONFIG_C, + CHUNK_SIZE_C, &rq_regs->rq_regs_c.chunk_size, + MIN_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_chunk_size, + META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.meta_chunk_size, + MIN_META_CHUNK_SIZE_C, &rq_regs->rq_regs_c.min_meta_chunk_size, + DPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.dpte_group_size, + MPTE_GROUP_SIZE_C, &rq_regs->rq_regs_c.mpte_group_size, + SWATH_HEIGHT_C, &rq_regs->rq_regs_c.swath_height, + PTE_ROW_HEIGHT_LINEAR_C, &rq_regs->rq_regs_c.pte_row_height_linear); + + /* DLG - Per hubp */ + REG_GET_2(BLANK_OFFSET_0, + REFCYC_H_BLANK_END, &dlg_attr->refcyc_h_blank_end, + DLG_V_BLANK_END, &dlg_attr->dlg_vblank_end); + + REG_GET(BLANK_OFFSET_1, + MIN_DST_Y_NEXT_START, &dlg_attr->min_dst_y_next_start); + + REG_GET(DST_DIMENSIONS, + REFCYC_PER_HTOTAL, &dlg_attr->refcyc_per_htotal); + + REG_GET_2(DST_AFTER_SCALER, + REFCYC_X_AFTER_SCALER, &dlg_attr->refcyc_x_after_scaler, + DST_Y_AFTER_SCALER, &dlg_attr->dst_y_after_scaler); + + if (REG(PREFETCH_SETTINS)) + REG_GET_2(PREFETCH_SETTINS, + DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch, + VRATIO_PREFETCH, &dlg_attr->vratio_prefetch); + else + REG_GET_2(PREFETCH_SETTINGS, + DST_Y_PREFETCH, &dlg_attr->dst_y_prefetch, + VRATIO_PREFETCH, &dlg_attr->vratio_prefetch); + + REG_GET_2(VBLANK_PARAMETERS_0, + DST_Y_PER_VM_VBLANK, &dlg_attr->dst_y_per_vm_vblank, + DST_Y_PER_ROW_VBLANK, &dlg_attr->dst_y_per_row_vblank); + + REG_GET(REF_FREQ_TO_PIX_FREQ, + REF_FREQ_TO_PIX_FREQ, &dlg_attr->ref_freq_to_pix_freq); + + /* DLG - Per luma/chroma */ + REG_GET(VBLANK_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_VBLANK_L, &dlg_attr->refcyc_per_pte_group_vblank_l); + + REG_GET(VBLANK_PARAMETERS_3, + REFCYC_PER_META_CHUNK_VBLANK_L, &dlg_attr->refcyc_per_meta_chunk_vblank_l); + + if (REG(NOM_PARAMETERS_0)) + REG_GET(NOM_PARAMETERS_0, + DST_Y_PER_PTE_ROW_NOM_L, &dlg_attr->dst_y_per_pte_row_nom_l); + + if (REG(NOM_PARAMETERS_1)) + REG_GET(NOM_PARAMETERS_1, + REFCYC_PER_PTE_GROUP_NOM_L, &dlg_attr->refcyc_per_pte_group_nom_l); + + REG_GET(NOM_PARAMETERS_4, + DST_Y_PER_META_ROW_NOM_L, &dlg_attr->dst_y_per_meta_row_nom_l); + + REG_GET(NOM_PARAMETERS_5, + REFCYC_PER_META_CHUNK_NOM_L, &dlg_attr->refcyc_per_meta_chunk_nom_l); + + REG_GET_2(PER_LINE_DELIVERY_PRE, + REFCYC_PER_LINE_DELIVERY_PRE_L, &dlg_attr->refcyc_per_line_delivery_pre_l, + REFCYC_PER_LINE_DELIVERY_PRE_C, &dlg_attr->refcyc_per_line_delivery_pre_c); + + REG_GET_2(PER_LINE_DELIVERY, + REFCYC_PER_LINE_DELIVERY_L, &dlg_attr->refcyc_per_line_delivery_l, + REFCYC_PER_LINE_DELIVERY_C, &dlg_attr->refcyc_per_line_delivery_c); + + if (REG(PREFETCH_SETTINS_C)) + REG_GET(PREFETCH_SETTINS_C, + VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c); + else + REG_GET(PREFETCH_SETTINGS_C, + VRATIO_PREFETCH_C, &dlg_attr->vratio_prefetch_c); + + REG_GET(VBLANK_PARAMETERS_2, + REFCYC_PER_PTE_GROUP_VBLANK_C, &dlg_attr->refcyc_per_pte_group_vblank_c); + + REG_GET(VBLANK_PARAMETERS_4, + REFCYC_PER_META_CHUNK_VBLANK_C, &dlg_attr->refcyc_per_meta_chunk_vblank_c); + + if (REG(NOM_PARAMETERS_2)) + REG_GET(NOM_PARAMETERS_2, + DST_Y_PER_PTE_ROW_NOM_C, &dlg_attr->dst_y_per_pte_row_nom_c); + + if (REG(NOM_PARAMETERS_3)) + REG_GET(NOM_PARAMETERS_3, + REFCYC_PER_PTE_GROUP_NOM_C, &dlg_attr->refcyc_per_pte_group_nom_c); + + REG_GET(NOM_PARAMETERS_6, + DST_Y_PER_META_ROW_NOM_C, &dlg_attr->dst_y_per_meta_row_nom_c); + + REG_GET(NOM_PARAMETERS_7, + REFCYC_PER_META_CHUNK_NOM_C, &dlg_attr->refcyc_per_meta_chunk_nom_c); + + /* TTU - per hubp */ + REG_GET_2(DCN_TTU_QOS_WM, + QoS_LEVEL_LOW_WM, &ttu_attr->qos_level_low_wm, + QoS_LEVEL_HIGH_WM, &ttu_attr->qos_level_high_wm); + + REG_GET_2(DCN_GLOBAL_TTU_CNTL, + MIN_TTU_VBLANK, &ttu_attr->min_ttu_vblank, + QoS_LEVEL_FLIP, &ttu_attr->qos_level_flip); + + /* TTU - per luma/chroma */ + /* Assumed surf0 is luma and 1 is chroma */ + + REG_GET_3(DCN_SURF0_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_l, + QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_l, + QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_l); + + REG_GET(DCN_SURF0_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, + &ttu_attr->refcyc_per_req_delivery_pre_l); + + REG_GET_3(DCN_SURF1_TTU_CNTL0, + REFCYC_PER_REQ_DELIVERY, &ttu_attr->refcyc_per_req_delivery_c, + QoS_LEVEL_FIXED, &ttu_attr->qos_level_fixed_c, + QoS_RAMP_DISABLE, &ttu_attr->qos_ramp_disable_c); + + REG_GET(DCN_SURF1_TTU_CNTL1, + REFCYC_PER_REQ_DELIVERY_PRE, + &ttu_attr->refcyc_per_req_delivery_pre_c); + + /* Rest of hubp */ REG_GET(DCSURF_SURFACE_CONFIG, SURFACE_PIXEL_FORMAT, &s->pixel_format); @@ -956,6 +1106,7 @@ static struct hubp_funcs dcn10_hubp_funcs = { .hubp_disconnect = hubp1_disconnect, .hubp_clk_cntl = hubp1_clk_cntl, .hubp_vtg_sel = hubp1_vtg_sel, + .hubp_read_state = hubp1_read_state, }; /*****************************************/ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index e0d6d32357c0..920ae3a1b412 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -690,24 +690,7 @@ void dcn10_hubp_construct( const struct dcn_mi_shift *hubp_shift, const struct dcn_mi_mask *hubp_mask); - -struct dcn_hubp_state { - uint32_t pixel_format; - uint32_t inuse_addr_hi; - uint32_t viewport_width; - uint32_t viewport_height; - uint32_t rotation_angle; - uint32_t h_mirror_en; - uint32_t sw_mode; - uint32_t dcc_en; - uint32_t blank_en; - uint32_t underflow_status; - uint32_t ttu_disable; - uint32_t min_ttu_vblank; - uint32_t qos_level_low_wm; - uint32_t qos_level_high_wm; -}; -void hubp1_read_state(struct dcn10_hubp *hubp1, +void hubp1_read_state(struct hubp *hubp, struct dcn_hubp_state *s); enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 1f59b27e611a..c9d4e96084b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -112,6 +112,104 @@ void dcn10_log_hubbub_state(struct dc *dc) DTN_INFO("\n"); } +static void print_rq_dlg_ttu_regs(struct dc_context *dc_ctx, struct dcn_hubp_state *s) +{ + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr; + struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs; + + DTN_INFO("========Requester========\n"); + DTN_INFO("drq_expansion_mode = 0x%0x\n", rq_regs->drq_expansion_mode); + DTN_INFO("prq_expansion_mode = 0x%0x\n", rq_regs->prq_expansion_mode); + DTN_INFO("mrq_expansion_mode = 0x%0x\n", rq_regs->mrq_expansion_mode); + DTN_INFO("crq_expansion_mode = 0x%0x\n", rq_regs->crq_expansion_mode); + DTN_INFO("plane1_base_address = 0x%0x\n", rq_regs->plane1_base_address); + DTN_INFO("==<LUMA>==\n"); + DTN_INFO("chunk_size = 0x%0x\n", rq_regs->rq_regs_l.chunk_size); + DTN_INFO("min_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.min_chunk_size); + DTN_INFO("meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.meta_chunk_size); + DTN_INFO("min_meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_l.min_meta_chunk_size); + DTN_INFO("dpte_group_size = 0x%0x\n", rq_regs->rq_regs_l.dpte_group_size); + DTN_INFO("mpte_group_size = 0x%0x\n", rq_regs->rq_regs_l.mpte_group_size); + DTN_INFO("swath_height = 0x%0x\n", rq_regs->rq_regs_l.swath_height); + DTN_INFO("pte_row_height_linear = 0x%0x\n", rq_regs->rq_regs_l.pte_row_height_linear); + DTN_INFO("==<CHROMA>==\n"); + DTN_INFO("chunk_size = 0x%0x\n", rq_regs->rq_regs_c.chunk_size); + DTN_INFO("min_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.min_chunk_size); + DTN_INFO("meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.meta_chunk_size); + DTN_INFO("min_meta_chunk_size = 0x%0x\n", rq_regs->rq_regs_c.min_meta_chunk_size); + DTN_INFO("dpte_group_size = 0x%0x\n", rq_regs->rq_regs_c.dpte_group_size); + DTN_INFO("mpte_group_size = 0x%0x\n", rq_regs->rq_regs_c.mpte_group_size); + DTN_INFO("swath_height = 0x%0x\n", rq_regs->rq_regs_c.swath_height); + DTN_INFO("pte_row_height_linear = 0x%0x\n", rq_regs->rq_regs_c.pte_row_height_linear); + + DTN_INFO("========DLG========\n"); + DTN_INFO("refcyc_h_blank_end = 0x%0x\n", dlg_regs->refcyc_h_blank_end); + DTN_INFO("dlg_vblank_end = 0x%0x\n", dlg_regs->dlg_vblank_end); + DTN_INFO("min_dst_y_next_start = 0x%0x\n", dlg_regs->min_dst_y_next_start); + DTN_INFO("refcyc_per_htotal = 0x%0x\n", dlg_regs->refcyc_per_htotal); + DTN_INFO("refcyc_x_after_scaler = 0x%0x\n", dlg_regs->refcyc_x_after_scaler); + DTN_INFO("dst_y_after_scaler = 0x%0x\n", dlg_regs->dst_y_after_scaler); + DTN_INFO("dst_y_prefetch = 0x%0x\n", dlg_regs->dst_y_prefetch); + DTN_INFO("dst_y_per_vm_vblank = 0x%0x\n", dlg_regs->dst_y_per_vm_vblank); + DTN_INFO("dst_y_per_row_vblank = 0x%0x\n", dlg_regs->dst_y_per_row_vblank); + DTN_INFO("dst_y_per_vm_flip = 0x%0x\n", dlg_regs->dst_y_per_vm_flip); + DTN_INFO("dst_y_per_row_flip = 0x%0x\n", dlg_regs->dst_y_per_row_flip); + DTN_INFO("ref_freq_to_pix_freq = 0x%0x\n", dlg_regs->ref_freq_to_pix_freq); + DTN_INFO("vratio_prefetch = 0x%0x\n", dlg_regs->vratio_prefetch); + DTN_INFO("vratio_prefetch_c = 0x%0x\n", dlg_regs->vratio_prefetch_c); + DTN_INFO("refcyc_per_pte_group_vblank_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_l); + DTN_INFO("refcyc_per_pte_group_vblank_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_vblank_c); + DTN_INFO("refcyc_per_meta_chunk_vblank_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_l); + DTN_INFO("refcyc_per_meta_chunk_vblank_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_vblank_c); + DTN_INFO("refcyc_per_pte_group_flip_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_l); + DTN_INFO("refcyc_per_pte_group_flip_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_flip_c); + DTN_INFO("refcyc_per_meta_chunk_flip_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_l); + DTN_INFO("refcyc_per_meta_chunk_flip_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_flip_c); + DTN_INFO("dst_y_per_pte_row_nom_l = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_l); + DTN_INFO("dst_y_per_pte_row_nom_c = 0x%0x\n", dlg_regs->dst_y_per_pte_row_nom_c); + DTN_INFO("refcyc_per_pte_group_nom_l = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_l); + DTN_INFO("refcyc_per_pte_group_nom_c = 0x%0x\n", dlg_regs->refcyc_per_pte_group_nom_c); + DTN_INFO("dst_y_per_meta_row_nom_l = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_l); + DTN_INFO("dst_y_per_meta_row_nom_c = 0x%0x\n", dlg_regs->dst_y_per_meta_row_nom_c); + DTN_INFO("refcyc_per_meta_chunk_nom_l = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_l); + DTN_INFO("refcyc_per_meta_chunk_nom_c = 0x%0x\n", dlg_regs->refcyc_per_meta_chunk_nom_c); + DTN_INFO("refcyc_per_line_delivery_pre_l = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_l); + DTN_INFO("refcyc_per_line_delivery_pre_c = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_pre_c); + DTN_INFO("refcyc_per_line_delivery_l = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_l); + DTN_INFO("refcyc_per_line_delivery_c = 0x%0x\n", dlg_regs->refcyc_per_line_delivery_c); + DTN_INFO("chunk_hdl_adjust_cur0 = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur0); + DTN_INFO("dst_y_offset_cur1 = 0x%0x\n", dlg_regs->dst_y_offset_cur1); + DTN_INFO("chunk_hdl_adjust_cur1 = 0x%0x\n", dlg_regs->chunk_hdl_adjust_cur1); + DTN_INFO("vready_after_vcount0 = 0x%0x\n", dlg_regs->vready_after_vcount0); + DTN_INFO("dst_y_delta_drq_limit = 0x%0x\n", dlg_regs->dst_y_delta_drq_limit); + DTN_INFO("xfc_reg_transfer_delay = 0x%0x\n", dlg_regs->xfc_reg_transfer_delay); + DTN_INFO("xfc_reg_precharge_delay = 0x%0x\n", dlg_regs->xfc_reg_precharge_delay); + DTN_INFO("xfc_reg_remote_surface_flip_latency = 0x%0x\n", dlg_regs->xfc_reg_remote_surface_flip_latency); + + DTN_INFO("========TTU========\n"); + DTN_INFO("qos_level_low_wm = 0x%0x\n", ttu_regs->qos_level_low_wm); + DTN_INFO("qos_level_high_wm = 0x%0x\n", ttu_regs->qos_level_high_wm); + DTN_INFO("min_ttu_vblank = 0x%0x\n", ttu_regs->min_ttu_vblank); + DTN_INFO("qos_level_flip = 0x%0x\n", ttu_regs->qos_level_flip); + DTN_INFO("refcyc_per_req_delivery_pre_l = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_l); + DTN_INFO("refcyc_per_req_delivery_l = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_l); + DTN_INFO("refcyc_per_req_delivery_pre_c = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_c); + DTN_INFO("refcyc_per_req_delivery_c = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_c); + DTN_INFO("refcyc_per_req_delivery_cur0 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur0); + DTN_INFO("refcyc_per_req_delivery_pre_cur0 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur0); + DTN_INFO("refcyc_per_req_delivery_cur1 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_cur1); + DTN_INFO("refcyc_per_req_delivery_pre_cur1 = 0x%0x\n", ttu_regs->refcyc_per_req_delivery_pre_cur1); + DTN_INFO("qos_level_fixed_l = 0x%0x\n", ttu_regs->qos_level_fixed_l); + DTN_INFO("qos_ramp_disable_l = 0x%0x\n", ttu_regs->qos_ramp_disable_l); + DTN_INFO("qos_level_fixed_c = 0x%0x\n", ttu_regs->qos_level_fixed_c); + DTN_INFO("qos_ramp_disable_c = 0x%0x\n", ttu_regs->qos_ramp_disable_c); + DTN_INFO("qos_level_fixed_cur0 = 0x%0x\n", ttu_regs->qos_level_fixed_cur0); + DTN_INFO("qos_ramp_disable_cur0 = 0x%0x\n", ttu_regs->qos_ramp_disable_cur0); + DTN_INFO("qos_level_fixed_cur1 = 0x%0x\n", ttu_regs->qos_level_fixed_cur1); + DTN_INFO("qos_ramp_disable_cur1 = 0x%0x\n", ttu_regs->qos_ramp_disable_cur1); +} + void dcn10_log_hw_state(struct dc *dc) { struct dc_context *dc_ctx = dc->ctx; @@ -129,7 +227,7 @@ void dcn10_log_hw_state(struct dc *dc) struct hubp *hubp = pool->hubps[i]; struct dcn_hubp_state s; - hubp1_read_state(TO_DCN10_HUBP(hubp), &s); + hubp->funcs->hubp_read_state(hubp, &s); DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh" " %6d %8d %7d %8xh", @@ -201,6 +299,20 @@ void dcn10_log_hw_state(struct dc *dc) } DTN_INFO("\n"); + for (i = 0; i < pool->pipe_count; i++) { + struct hubp *hubp = pool->hubps[i]; + struct dcn_hubp_state s = {0}; + + if (!dc->current_state->res_ctx.pipe_ctx[i].stream) + continue; + + hubp->funcs->hubp_read_state(hubp, &s); + DTN_INFO("RQ-DLG-TTU registers for HUBP%d:\n", i); + print_rq_dlg_ttu_regs(dc_ctx, &s); + DTN_INFO("\n"); + } + DTN_INFO("\n"); + log_mpc_crc(dc); DTN_INFO_END(); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 9ced254e652c..3866147fb02a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -56,6 +56,25 @@ struct hubp { bool power_gated; }; +struct dcn_hubp_state { + struct _vcs_dpi_display_dlg_regs_st dlg_attr; + struct _vcs_dpi_display_ttu_regs_st ttu_attr; + struct _vcs_dpi_display_rq_regs_st rq_regs; + uint32_t pixel_format; + uint32_t inuse_addr_hi; + uint32_t viewport_width; + uint32_t viewport_height; + uint32_t rotation_angle; + uint32_t h_mirror_en; + uint32_t sw_mode; + uint32_t dcc_en; + uint32_t blank_en; + uint32_t underflow_status; + uint32_t ttu_disable; + uint32_t min_ttu_vblank; + uint32_t qos_level_low_wm; + uint32_t qos_level_high_wm; +}; struct hubp_funcs { void (*hubp_setup)( @@ -121,6 +140,7 @@ struct hubp_funcs { void (*hubp_clk_cntl)(struct hubp *hubp, bool enable); void (*hubp_vtg_sel)(struct hubp *hubp, uint32_t otg_inst); + void (*hubp_read_state)(struct hubp *hubp, struct dcn_hubp_state *s); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h index 77eb72874e90..3306e7b0b3e3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h +++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h @@ -183,6 +183,36 @@ FN(reg_name, f4), v4, \ FN(reg_name, f5), v5) +#define REG_GET_6(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6) \ + generic_reg_get6(CTX, REG(reg_name), \ + FN(reg_name, f1), v1, \ + FN(reg_name, f2), v2, \ + FN(reg_name, f3), v3, \ + FN(reg_name, f4), v4, \ + FN(reg_name, f5), v5, \ + FN(reg_name, f6), v6) + +#define REG_GET_7(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7) \ + generic_reg_get7(CTX, REG(reg_name), \ + FN(reg_name, f1), v1, \ + FN(reg_name, f2), v2, \ + FN(reg_name, f3), v3, \ + FN(reg_name, f4), v4, \ + FN(reg_name, f5), v5, \ + FN(reg_name, f6), v6, \ + FN(reg_name, f7), v7) + +#define REG_GET_8(reg_name, f1, v1, f2, v2, f3, v3, f4, v4, f5, v5, f6, v6, f7, v7, f8, v8) \ + generic_reg_get8(CTX, REG(reg_name), \ + FN(reg_name, f1), v1, \ + FN(reg_name, f2), v2, \ + FN(reg_name, f3), v3, \ + FN(reg_name, f4), v4, \ + FN(reg_name, f5), v5, \ + FN(reg_name, f6), v6, \ + FN(reg_name, f7), v7, \ + FN(reg_name, f8), v8) + /* macro to poll and wait for a register field to read back given value */ #define REG_WAIT(reg_name, field, val, delay_between_poll_us, max_try) \ @@ -389,4 +419,30 @@ uint32_t generic_reg_get5(const struct dc_context *ctx, uint32_t addr, uint8_t shift4, uint32_t mask4, uint32_t *field_value4, uint8_t shift5, uint32_t mask5, uint32_t *field_value5); +uint32_t generic_reg_get6(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6); + +uint32_t generic_reg_get7(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6, + uint8_t shift7, uint32_t mask7, uint32_t *field_value7); + +uint32_t generic_reg_get8(const struct dc_context *ctx, uint32_t addr, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + uint8_t shift2, uint32_t mask2, uint32_t *field_value2, + uint8_t shift3, uint32_t mask3, uint32_t *field_value3, + uint8_t shift4, uint32_t mask4, uint32_t *field_value4, + uint8_t shift5, uint32_t mask5, uint32_t *field_value5, + uint8_t shift6, uint32_t mask6, uint32_t *field_value6, + uint8_t shift7, uint32_t mask7, uint32_t *field_value7, + uint8_t shift8, uint32_t mask8, uint32_t *field_value8); #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_REG_HELPER_H_ */ -- 2.15.1