On Tue, May 7, 2019 at 2:09 AM Evan Quan <evan.quan@xxxxxxx> wrote: > > Update Vega10 top performance level power state accordingly > on OD. > > Change-Id: Iaadeefb2904222bf5f4d54b39d7179ce53f92ac0 > Signed-off-by: Evan Quan <evan.quan@xxxxxxx> Series is: Acked-by: Alex Deucher <alexander.deucher@xxxxxxx> > --- > .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 59 +++++++++++++++++++ > 1 file changed, 59 insertions(+) > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c > index f4b81f50b185..4878938ecf33 100644 > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c > @@ -5009,6 +5009,63 @@ static bool vega10_check_clk_voltage_valid(struct pp_hwmgr *hwmgr, > return true; > } > > +static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr) > +{ > + struct vega10_hwmgr *data = hwmgr->backend; > + struct pp_power_state *ps = hwmgr->request_ps; > + struct vega10_power_state *vega10_ps; > + struct vega10_single_dpm_table *gfx_dpm_table = > + &data->dpm_table.gfx_table; > + struct vega10_single_dpm_table *soc_dpm_table = > + &data->dpm_table.soc_table; > + struct vega10_single_dpm_table *mem_dpm_table = > + &data->dpm_table.mem_table; > + int max_level; > + > + if (!ps) > + return; > + > + vega10_ps = cast_phw_vega10_power_state(&ps->hardware); > + max_level = vega10_ps->performance_level_count - 1; > + > + if (vega10_ps->performance_levels[max_level].gfx_clock != > + gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value) > + vega10_ps->performance_levels[max_level].gfx_clock = > + gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value; > + > + if (vega10_ps->performance_levels[max_level].soc_clock != > + soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value) > + vega10_ps->performance_levels[max_level].soc_clock = > + soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value; > + > + if (vega10_ps->performance_levels[max_level].mem_clock != > + mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value) > + vega10_ps->performance_levels[max_level].mem_clock = > + mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value; > + > + if (!hwmgr->ps) > + return; > + > + ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1)); > + vega10_ps = cast_phw_vega10_power_state(&ps->hardware); > + max_level = vega10_ps->performance_level_count - 1; > + > + if (vega10_ps->performance_levels[max_level].gfx_clock != > + gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value) > + vega10_ps->performance_levels[max_level].gfx_clock = > + gfx_dpm_table->dpm_levels[gfx_dpm_table->count - 1].value; > + > + if (vega10_ps->performance_levels[max_level].soc_clock != > + soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value) > + vega10_ps->performance_levels[max_level].soc_clock = > + soc_dpm_table->dpm_levels[soc_dpm_table->count - 1].value; > + > + if (vega10_ps->performance_levels[max_level].mem_clock != > + mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value) > + vega10_ps->performance_levels[max_level].mem_clock = > + mem_dpm_table->dpm_levels[mem_dpm_table->count - 1].value; > +} > + > static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr, > enum PP_OD_DPM_TABLE_COMMAND type) > { > @@ -5079,6 +5136,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr, > podn_vdd_dep->entries[podn_vdd_dep->count - 1].vddInd; > } > } > + vega10_odn_update_power_state(hwmgr); > } > > static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, > @@ -5113,6 +5171,7 @@ static int vega10_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, > } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) { > memcpy(&(data->dpm_table), &(data->golden_dpm_table), sizeof(struct vega10_dpm_table)); > vega10_odn_initial_default_setting(hwmgr); > + vega10_odn_update_power_state(hwmgr); > return 0; > } else if (PP_OD_COMMIT_DPM_TABLE == type) { > vega10_check_dpm_table_updated(hwmgr); > -- > 2.21.0 > > _______________________________________________ > amd-gfx mailing list > amd-gfx@xxxxxxxxxxxxxxxxxxxxx > https://lists.freedesktop.org/mailman/listinfo/amd-gfx _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx