From: Chris Park <chris.park@xxxxxxx> [Why] When Static screen from MALL, the cursor needs to be cached if cursor exceeds 64x64 size. [How] Program the bit that cache cursor in MALL when size of the cursor exceeds 64x64. Reviewed-by: Jun Lei <Jun.Lei@xxxxxxx> Acked-by: Alan Liu <HaoPing.Liu@xxxxxxx> Acked-by: Alex Hung <alex.hung@xxxxxxx> Signed-off-by: Chris Park <chris.park@xxxxxxx> --- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c | 4 ++-- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h | 2 +- drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 9 +++++++-- drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h | 2 +- 4 files changed, 11 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c index 3176b04a7740..6ec1c52535b9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c @@ -47,13 +47,13 @@ void hubp32_update_force_pstate_disallow(struct hubp *hubp, bool pstate_disallow DATA_UCLK_PSTATE_FORCE_VALUE, 0); } -void hubp32_update_mall_sel(struct hubp *hubp, uint32_t mall_sel) +void hubp32_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); // Also cache cursor in MALL if using MALL for SS REG_UPDATE_2(DCHUBP_MALL_CONFIG, USE_MALL_SEL, mall_sel, - USE_MALL_FOR_CURSOR, mall_sel == 2 ? 1 : 0); + USE_MALL_FOR_CURSOR, c_cursor); } void hubp32_prepare_subvp_buffering(struct hubp *hubp, bool enable) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h index c4315d50fbb0..56ef71151536 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.h @@ -52,7 +52,7 @@ void hubp32_update_force_pstate_disallow(struct hubp *hubp, bool pstate_disallow); -void hubp32_update_mall_sel(struct hubp *hubp, uint32_t mall_sel); +void hubp32_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor); void hubp32_prepare_subvp_buffering(struct hubp *hubp, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index bf9ac9dfc7dd..1f845e9ac406 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -670,18 +670,23 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context) { int i; unsigned int num_ways = dcn32_calculate_cab_allocation(dc, context); + bool cache_cursor = false; for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct hubp *hubp = pipe->plane_res.hubp; if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) { + if (hubp->curs_attr.width * hubp->curs_attr.height * 4 > 16384) + cache_cursor = true; + if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { - hubp->funcs->hubp_update_mall_sel(hubp, 1); + hubp->funcs->hubp_update_mall_sel(hubp, 1, false); } else { hubp->funcs->hubp_update_mall_sel(hubp, num_ways <= dc->caps.cache_num_ways && - pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED ? 2 : 0); + pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED ? 2 : 0, + cache_cursor); } } } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index 906818e792dd..44c4578193a3 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -198,7 +198,7 @@ struct hubp_funcs { void (*hubp_soft_reset)(struct hubp *hubp, bool reset); void (*hubp_update_force_pstate_disallow)(struct hubp *hubp, bool allow); - void (*hubp_update_mall_sel)(struct hubp *hubp, uint32_t mall_sel); + void (*hubp_update_mall_sel)(struct hubp *hubp, uint32_t mall_sel, bool c_cursor); void (*hubp_prepare_subvp_buffering)(struct hubp *hubp, bool enable); void (*hubp_set_flip_int)(struct hubp *hubp); -- 2.37.1