From: Dmytro Laktyushkin <Dmytro.Laktyushkin@xxxxxxx> SOC needs to be updated to the WM set A values before validation happens. Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@xxxxxxx> Reviewed-by: Eric Bernstein <Eric.Bernstein@xxxxxxx> Acked-by: Solomon Chiu <solomon.chiu@xxxxxxx> --- .../drm/amd/display/dc/dcn30/dcn30_resource.c | 17 ++++++++++++----- .../drm/amd/display/dc/dcn30/dcn30_resource.h | 1 + .../drm/amd/display/dc/dcn301/dcn301_resource.c | 1 + .../drm/amd/display/dc/dcn302/dcn302_resource.c | 1 + drivers/gpu/drm/amd/display/dc/inc/core_types.h | 2 ++ 5 files changed, 17 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index deab48806fa2..263c2986682d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -1876,6 +1876,7 @@ static noinline bool dcn30_internal_validate_bw( if (!pipes) return false; + dc->res_pool->funcs->update_soc_for_wm_a(dc, context); pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); DC_FP_START(); @@ -2225,11 +2226,7 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( * * Set A calculated last so that following calculations are based on Set A */ - if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) { - context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; - context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; - } + dc->res_pool->funcs->update_soc_for_wm_a(dc, context); context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; @@ -2272,6 +2269,15 @@ static noinline void dcn30_calculate_wm_and_dlg_fp( dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; } +void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) +{ + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; + } +} + void dcn30_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -2496,6 +2502,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = { .panel_cntl_create = dcn30_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h index 8ce7f6d39a20..b754b89beadf 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h @@ -60,6 +60,7 @@ void dcn30_calculate_wm_and_dlg( display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel); +void dcn30_update_soc_for_wm_a(struct dc *dc, struct dc_state *context); void dcn30_populate_dml_writeback_from_context( struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index 5f29a4f85ef2..e41747c39e29 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1721,6 +1721,7 @@ static struct resource_funcs dcn301_res_pool_funcs = { .panel_cntl_create = dcn301_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index daa16a41e6b7..0723e29fd42e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -1397,6 +1397,7 @@ static struct resource_funcs dcn302_res_pool_funcs = { .panel_cntl_create = dcn302_panel_cntl_create, .validate_bandwidth = dcn30_validate_bandwidth, .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg, + .update_soc_for_wm_a = dcn30_update_soc_for_wm_a, .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, .add_stream_to_ctx = dcn30_add_stream_to_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index c5f483287509..eb1a19bf0d81 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -110,6 +110,8 @@ struct resource_funcs { display_e2e_pipe_params_st *pipes, int pipe_cnt, int vlevel); + void (*update_soc_for_wm_a)( + struct dc *dc, struct dc_state *context); int (*populate_dml_pipes)( struct dc *dc, struct dc_state *context, -- 2.29.0 _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx