From: Alvin Lee <alvin.lee2@xxxxxxx> For FPO and SubVP/DRR cases we need to ensure to program OTG_V_TOTAL_MIN/MAX_SEL, otherwise stretching the vblank in FPO / SubVP / DRR cases will not have any effect and we could hit underflow / corruption. Reviewed-by: Aric Cyr <aric.cyr@xxxxxxx> Acked-by: Rodrigo Siqueira <rodrigo.siqueira@xxxxxxx> Signed-off-by: Alvin Lee <alvin.lee2@xxxxxxx> --- .../gpu/drm/amd/display/dc/core/dc_resource.c | 14 +++++++ .../display/dc/dcn32/dcn32_resource_helpers.c | 14 ------- .../drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 11 +++--- .../amd/display/dc/hwss/dcn20/dcn20_hwseq.c | 37 +++++++++++++++++++ drivers/gpu/drm/amd/display/dc/inc/resource.h | 3 ++ .../dc/resource/dcn32/dcn32_resource.c | 2 +- .../dc/resource/dcn32/dcn32_resource.h | 3 -- .../dc/resource/dcn321/dcn321_resource.c | 2 +- 8 files changed, 62 insertions(+), 24 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index f2abc1096ffb..57f0ddd15923 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -4986,6 +4986,20 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc, return DC_OK; } +bool resource_subvp_in_use(struct dc *dc, + struct dc_state *context) +{ + uint32_t i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) + return true; + } + return false; +} + bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream) { if (!dc->debug.disable_subvp_high_refresh && is_subvp_high_refresh_candidate(stream)) diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index f698a7e63291..44ba1edb7f53 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -183,20 +183,6 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc, return true; } -bool dcn32_subvp_in_use(struct dc *dc, - struct dc_state *context) -{ - uint32_t i; - - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) - return true; - } - return false; -} - bool dcn32_mpo_in_use(struct dc_state *context) { uint32_t i; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 9f37f717a1f8..aa68d010cbfd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -33,6 +33,7 @@ #include "dcn30/dcn30_resource.h" #include "link.h" #include "dc_state_priv.h" +#include "resource.h" #define DC_LOGGER_INIT(logger) @@ -291,7 +292,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, /* for subvp + DRR case, if subvp pipes are still present we support pstate */ if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported && - dcn32_subvp_in_use(dc, context)) + resource_subvp_in_use(dc, context)) vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support; if (vlevel < context->bw_ctx.dml.vba.soc.num_states && @@ -2272,7 +2273,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, unsigned int dummy_latency_index = 0; int maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed; - bool subvp_in_use = dcn32_subvp_in_use(dc, context); + bool subvp_active = resource_subvp_in_use(dc, context); unsigned int min_dram_speed_mts_margin; bool need_fclk_lat_as_dummy = false; bool is_subvp_p_drr = false; @@ -2281,7 +2282,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, dc_assert_fp_enabled(); /* need to find dummy latency index for subvp */ - if (subvp_in_use) { + if (subvp_active) { /* Override DRAMClockChangeSupport for SubVP + DRR case where the DRR cannot switch without stretching it's VBLANK */ if (!pstate_en) { context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; @@ -2467,7 +2468,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, dc->clk_mgr->bw_params->clk_table.entries[min_dram_speed_mts_offset].memclk_mhz * 16; } - if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_in_use) { + if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && !subvp_active) { /* find largest table entry that is lower than dram speed, * but lower than DPM0 still uses DPM0 */ @@ -3527,7 +3528,7 @@ void dcn32_set_clock_limits(const struct _vcs_dpi_soc_bounding_box_st *soc_bb) void dcn32_override_min_req_memclk(struct dc *dc, struct dc_state *context) { // WA: restrict FPO and SubVP to use first non-strobe mode (DCN32 BW issue) - if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dcn32_subvp_in_use(dc, context)) && + if ((context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || resource_subvp_in_use(dc, context)) && dc->dml.soc.num_chans <= 8) { int num_mclk_levels = dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index 0dfcb3cdcd20..bc71a9b058fe 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1882,6 +1882,42 @@ static void dcn20_program_pipe( } } +static void update_vmin_vmax_fams(struct dc *dc, + struct dc_state *context) +{ + uint32_t i; + struct drr_params params = {0}; + bool subvp_in_use = resource_subvp_in_use(dc, context); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (resource_is_pipe_type(pipe, OTG_MASTER) && + ((subvp_in_use && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM && + pipe->stream->allow_freesync) || (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && pipe->stream->fpo_in_use))) { + if (!pipe->stream->vrr_active_variable && !pipe->stream->vrr_active_fixed) { + struct timing_generator *tg = context->res_ctx.pipe_ctx[i].stream_res.tg; + + /* DRR should be configured already if we're in active variable + * or active fixed, so only program if we're not in this state + */ + params.vertical_total_min = pipe->stream->timing.v_total; + params.vertical_total_max = pipe->stream->timing.v_total; + tg->funcs->set_drr(tg, ¶ms); + } + } else { + if (resource_is_pipe_type(pipe, OTG_MASTER) && + !pipe->stream->vrr_active_variable && + !pipe->stream->vrr_active_fixed) { + struct timing_generator *tg = context->res_ctx.pipe_ctx[i].stream_res.tg; + params.vertical_total_min = 0; + params.vertical_total_max = 0; + tg->funcs->set_drr(tg, ¶ms); + } + } + } +} + void dcn20_program_front_end_for_ctx( struct dc *dc, struct dc_state *context) @@ -1958,6 +1994,7 @@ void dcn20_program_front_end_for_ctx( && context->res_ctx.pipe_ctx[i].stream) hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); + update_vmin_vmax_fams(dc, context); /* Disconnect mpcc */ for (i = 0; i < dc->res_pool->pipe_count; i++) diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index c958ef37b78a..1d51fed12e20 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -609,6 +609,9 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy( struct pipe_ctx *sec_pipe, bool odm); +bool resource_subvp_in_use(struct dc *dc, + struct dc_state *context); + /* A test harness interface that modifies dp encoder resources in the given dc * state and bypasses the need to revalidate. The interface assumes that the * test harness interface is called with pre-validated link config stored in the diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index c4d71e7f18af..ac04a9c9a3d8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1899,7 +1899,7 @@ int dcn32_populate_dml_pipes_from_context( static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, - .get_subvp_en = dcn32_subvp_in_use, + .get_subvp_en = resource_subvp_in_use, }; void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 0c87b0fabba7..62611acd4bcb 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -131,9 +131,6 @@ void dcn32_merge_pipes_for_subvp(struct dc *dc, bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc, struct dc_state *context); -bool dcn32_subvp_in_use(struct dc *dc, - struct dc_state *context); - bool dcn32_mpo_in_use(struct dc_state *context); bool dcn32_any_surfaces_rotated(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 74412e5f03fe..e1ab207c46f1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1574,7 +1574,7 @@ static void dcn321_destroy_resource_pool(struct resource_pool **pool) static struct dc_cap_funcs cap_funcs = { .get_dcc_compression_cap = dcn20_get_dcc_compression_cap, - .get_subvp_en = dcn32_subvp_in_use, + .get_subvp_en = resource_subvp_in_use, }; static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) -- 2.42.0