From: Joshua Aberback <joshua.aberback@xxxxxxx> [Why] We want to keep the same buffer allocated for use during multiple hardware initializations. [How] - allocate gpu memory buffer on clock manager construct - free gpu memory buffer on clock manager destruct Signed-off-by: Joshua Aberback <joshua.aberback@xxxxxxx> Reviewed-by: Jun Lei <Jun.Lei@xxxxxxx> Acked-by: Eryk Brol <eryk.brol@xxxxxxx> --- .../gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c | 3 --- .../display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c | 21 +++++++++++-------- .../amd/display/dc/inc/hw/clk_mgr_internal.h | 2 ++ 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index f376058b5df6..6a345d43028c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -174,9 +174,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p case FAMILY_NV: #if defined(CONFIG_DRM_AMD_DC_DCN3_0) if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev)) { - /* TODO: to add SIENNA_CICHLID clk_mgr support, once CLK IP header files are available, - * for now use DCN3AG clk mgr. - */ dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg); break; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index b27cb52903f5..872ee08b315f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -344,16 +344,12 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, static void dcn3_notify_wm_ranges(struct clk_mgr *clk_mgr_base) { unsigned int i; - long long table_addr; - WatermarksExternal_t *table; struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + WatermarksExternal_t *table = (WatermarksExternal_t *) clk_mgr->wm_range_table; if (!clk_mgr->smu_present) return; - /* need physical address of table to give to PMFW */ - table = (WatermarksExternal_t *) dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t), &table_addr); - if (!table) // should log failure return; @@ -371,11 +367,9 @@ static void dcn3_notify_wm_ranges(struct clk_mgr *clk_mgr_base) table->Watermarks.WatermarkRow[WM_DCEFCLK][i].Flags = clk_mgr->base.bw_params->wm_table.nv_entries[i].pmfw_breakdown.wm_type; } - dcn30_smu_set_dram_addr_high(clk_mgr, table_addr >> 32); - dcn30_smu_set_dram_addr_low(clk_mgr, table_addr & 0xFFFFFFFF); + dcn30_smu_set_dram_addr_high(clk_mgr, clk_mgr->wm_range_table_addr >> 32); + dcn30_smu_set_dram_addr_low(clk_mgr, clk_mgr->wm_range_table_addr & 0xFFFFFFFF); dcn30_smu_transfer_wm_table_dram_2_smu(clk_mgr); - - dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, table); } /* Set min memclk to minimum, either constrained by the current mode or DPM0 */ @@ -534,10 +528,19 @@ void dcn3_clk_mgr_construct( dce_clock_read_ss_info(clk_mgr); clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); + + /* need physical address of table to give to PMFW */ + clk_mgr->wm_range_table = dm_helpers_allocate_gpu_mem(clk_mgr->base.ctx, + DC_MEM_ALLOC_TYPE_GART, sizeof(WatermarksExternal_t), + &clk_mgr->wm_range_table_addr); } void dcn3_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr) { if (clk_mgr->base.bw_params) kfree(clk_mgr->base.bw_params); + + if (clk_mgr->wm_range_table) + dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART, + clk_mgr->wm_range_table); } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index c3c151be7d03..82212ae2755a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -273,6 +273,8 @@ struct clk_mgr_internal { #ifdef CONFIG_DRM_AMD_DC_DCN3_0 bool smu_present; + void *wm_range_table; + long long wm_range_table_addr; #endif }; -- 2.25.1 _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx