From: Eric Huang <JinHuiEric.Huang@xxxxxxx> Signed-off-by: Eric Huang <JinHuiEric.Huang at amd.com> Signed-off-by: Alex Deucher <alexander.deucher at amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | 1 + drivers/gpu/drm/amd/powerplay/hwmgr/Makefile | 4 +- drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 9 + drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 4378 ++++++++++++++++++++ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h | 434 ++ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h | 44 + .../gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c | 137 + .../gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h | 65 + .../gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h | 331 ++ .../amd/powerplay/hwmgr/vega10_processpptables.c | 1056 +++++ .../amd/powerplay/hwmgr/vega10_processpptables.h | 34 + .../gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | 761 ++++ .../gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h | 83 + drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 3 + drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h | 48 + drivers/gpu/drm/amd/powerplay/inc/smumgr.h | 3 + drivers/gpu/drm/amd/powerplay/smumgr/Makefile | 2 +- drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | 9 + .../gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c | 564 +++ .../gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h | 70 + 20 files changed, 8034 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c create mode 100644 drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h create mode 100644 drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c create mode 100644 drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 96a5113..f5ae871 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -71,6 +71,7 @@ static int amdgpu_pp_early_init(void *handle) case CHIP_TOPAZ: case CHIP_CARRIZO: case CHIP_STONEY: + case CHIP_VEGA10: adev->pp_enabled = true; if (amdgpu_create_pp_handle(adev)) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index ccb51c2..27db2b7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -7,7 +7,9 @@ HARDWARE_MGR = hwmgr.o processpptables.o functiontables.o \ cz_clockpowergating.o pppcielanes.o\ process_pptables_v1_0.o ppatomctrl.o ppatomfwctrl.o \ smu7_hwmgr.o smu7_powertune.o smu7_thermal.o \ - smu7_clockpowergating.o + smu7_clockpowergating.o \ + vega10_processpptables.o vega10_hwmgr.o vega10_powertune.o \ + vega10_thermal.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 2ea9c0e..ff4ae3d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -106,6 +106,15 @@ int hwmgr_early_init(struct pp_instance *handle) } smu7_init_function_pointers(hwmgr); break; + case AMDGPU_FAMILY_AI: + switch (hwmgr->chip_id) { + case CHIP_VEGA10: + vega10_hwmgr_init(hwmgr); + break; + default: + return -EINVAL; + } + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c new file mode 100644 index 0000000..91fa0b5 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -0,0 +1,4378 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/fb.h> +#include "linux/delay.h" + +#include "hwmgr.h" +#include "amd_powerplay.h" +#include "vega10_smumgr.h" +#include "hardwaremanager.h" +#include "ppatomfwctrl.h" +#include "atomfirmware.h" +#include "cgs_common.h" +#include "vega10_powertune.h" +#include "smu9.h" +#include "smu9_driver_if.h" +#include "vega10_inc.h" +#include "pp_soc15.h" +#include "pppcielanes.h" +#include "vega10_hwmgr.h" +#include "vega10_processpptables.h" +#include "vega10_pptable.h" +#include "vega10_thermal.h" +#include "pp_debug.h" +#include "pp_acpi.h" +#include "amd_pcie_helpers.h" +#include "cgs_linux.h" +#include "ppinterrupt.h" + + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +#define HBM_MEMORY_CHANNEL_WIDTH 128 + +uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2}; + +#define MEM_FREQ_LOW_LATENCY 25000 +#define MEM_FREQ_HIGH_LATENCY 80000 +#define MEM_LATENCY_HIGH 245 +#define MEM_LATENCY_LOW 35 +#define MEM_LATENCY_ERR 0xFFFF + +#define mmDF_CS_AON0_DramBaseAddress0 0x0044 +#define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0 + +//DF_CS_AON0_DramBaseAddress0 +#define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0 +#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1 +#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4 +#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8 +#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc +#define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L +#define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L +#define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L +#define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L +#define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L + +const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic); + +struct vega10_power_state *cast_phw_vega10_power_state( + struct pp_hw_power_state *hw_ps) +{ + PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL;); + + return (struct vega10_power_state *)hw_ps; +} + +const struct vega10_power_state *cast_const_phw_vega10_power_state( + const struct pp_hw_power_state *hw_ps) +{ + PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic), + "Invalid Powerstate Type!", + return NULL;); + + return (const struct vega10_power_state *)hw_ps; +} + +static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + data->registry_data.sclk_dpm_key_disabled = + hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true; + data->registry_data.socclk_dpm_key_disabled = + hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true; + data->registry_data.mclk_dpm_key_disabled = + hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true; + + data->registry_data.dcefclk_dpm_key_disabled = + hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true; + + if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) { + data->registry_data.power_containment_support = 1; + data->registry_data.enable_pkg_pwr_tracking_feature = 1; + data->registry_data.enable_tdc_limit_feature = 1; + } + + data->registry_data.pcie_dpm_key_disabled = 1; + data->registry_data.disable_water_mark = 0; + + data->registry_data.fan_control_support = 1; + data->registry_data.thermal_support = 1; + data->registry_data.fw_ctf_enabled = 1; + + data->registry_data.avfs_support = 1; + data->registry_data.led_dpm_enabled = 1; + + data->registry_data.vr0hot_enabled = 1; + data->registry_data.vr1hot_enabled = 1; + data->registry_data.regulator_hot_gpio_support = 1; + + data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT; + data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT; + + data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT; + data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT; + data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT; + data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT; +} + +static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct cgs_system_info sys_info = {0}; + int result; + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPatchPowerState); + + if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_EnableSMU7ThermalManagement); + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; + result = cgs_query_system_info(hwmgr->device, &sys_info); + + if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + + if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE)) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UnTabledHardwareInterface); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODFuzzyFanControlSupport); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DynamicPowerManagement); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SMC); + + /* power tune caps */ + /* assume disabled */ + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SQRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DBRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TDRamping); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TCPRamping); + + if (data->registry_data.power_containment_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC); + + if (table_info->tdp_table->usClockStretchAmount && + data->registry_data.clock_stretcher_support) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEDPM); + + return 0; +} + +static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + int i; + + vega10_initialize_power_tune_defaults(hwmgr); + + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + data->smu_features[i].smu_feature_id = 0xffff; + data->smu_features[i].smu_feature_bitmap = 1 << i; + data->smu_features[i].enabled = false; + data->smu_features[i].supported = false; + } + + data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id = + FEATURE_DPM_PREFETCHER_BIT; + data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id = + FEATURE_DPM_GFXCLK_BIT; + data->smu_features[GNLD_DPM_UCLK].smu_feature_id = + FEATURE_DPM_UCLK_BIT; + data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id = + FEATURE_DPM_SOCCLK_BIT; + data->smu_features[GNLD_DPM_UVD].smu_feature_id = + FEATURE_DPM_UVD_BIT; + data->smu_features[GNLD_DPM_VCE].smu_feature_id = + FEATURE_DPM_VCE_BIT; + data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id = + FEATURE_DPM_MP0CLK_BIT; + data->smu_features[GNLD_DPM_LINK].smu_feature_id = + FEATURE_DPM_LINK_BIT; + data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id = + FEATURE_DPM_DCEFCLK_BIT; + data->smu_features[GNLD_ULV].smu_feature_id = + FEATURE_ULV_BIT; + data->smu_features[GNLD_AVFS].smu_feature_id = + FEATURE_AVFS_BIT; + data->smu_features[GNLD_DS_GFXCLK].smu_feature_id = + FEATURE_DS_GFXCLK_BIT; + data->smu_features[GNLD_DS_SOCCLK].smu_feature_id = + FEATURE_DS_SOCCLK_BIT; + data->smu_features[GNLD_DS_LCLK].smu_feature_id = + FEATURE_DS_LCLK_BIT; + data->smu_features[GNLD_PPT].smu_feature_id = + FEATURE_PPT_BIT; + data->smu_features[GNLD_TDC].smu_feature_id = + FEATURE_TDC_BIT; + data->smu_features[GNLD_THERMAL].smu_feature_id = + FEATURE_THERMAL_BIT; + data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id = + FEATURE_GFX_PER_CU_CG_BIT; + data->smu_features[GNLD_RM].smu_feature_id = + FEATURE_RM_BIT; + data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id = + FEATURE_DS_DCEFCLK_BIT; + data->smu_features[GNLD_ACDC].smu_feature_id = + FEATURE_ACDC_BIT; + data->smu_features[GNLD_VR0HOT].smu_feature_id = + FEATURE_VR0HOT_BIT; + data->smu_features[GNLD_VR1HOT].smu_feature_id = + FEATURE_VR1HOT_BIT; + data->smu_features[GNLD_FW_CTF].smu_feature_id = + FEATURE_FW_CTF_BIT; + data->smu_features[GNLD_LED_DISPLAY].smu_feature_id = + FEATURE_LED_DISPLAY_BIT; + data->smu_features[GNLD_FAN_CONTROL].smu_feature_id = + FEATURE_FAN_CONTROL_BIT; + data->smu_features[GNLD_VOLTAGE_CONTROLLER].smu_feature_id = + FEATURE_VOLTAGE_CONTROLLER_BIT; + + if (!data->registry_data.prefetcher_dpm_key_disabled) + data->smu_features[GNLD_DPM_PREFETCHER].supported = true; + + if (!data->registry_data.sclk_dpm_key_disabled) + data->smu_features[GNLD_DPM_GFXCLK].supported = true; + + if (!data->registry_data.mclk_dpm_key_disabled) + data->smu_features[GNLD_DPM_UCLK].supported = true; + + if (!data->registry_data.socclk_dpm_key_disabled) + data->smu_features[GNLD_DPM_SOCCLK].supported = true; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDDPM)) + data->smu_features[GNLD_DPM_UVD].supported = true; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEDPM)) + data->smu_features[GNLD_DPM_VCE].supported = true; + + if (!data->registry_data.pcie_dpm_key_disabled) + data->smu_features[GNLD_DPM_LINK].supported = true; + + if (!data->registry_data.dcefclk_dpm_key_disabled) + data->smu_features[GNLD_DPM_DCEFCLK].supported = true; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep) && + data->registry_data.sclk_deep_sleep_support) { + data->smu_features[GNLD_DS_GFXCLK].supported = true; + data->smu_features[GNLD_DS_SOCCLK].supported = true; + data->smu_features[GNLD_DS_LCLK].supported = true; + } + + if (data->registry_data.enable_pkg_pwr_tracking_feature) + data->smu_features[GNLD_PPT].supported = true; + + if (data->registry_data.enable_tdc_limit_feature) + data->smu_features[GNLD_TDC].supported = true; + + if (data->registry_data.thermal_support) + data->smu_features[GNLD_THERMAL].supported = true; + + if (data->registry_data.fan_control_support) + data->smu_features[GNLD_FAN_CONTROL].supported = true; + + if (data->registry_data.fw_ctf_enabled) + data->smu_features[GNLD_FW_CTF].supported = true; + + if (data->registry_data.avfs_support) + data->smu_features[GNLD_AVFS].supported = true; + + if (data->registry_data.led_dpm_enabled) + data->smu_features[GNLD_LED_DISPLAY].supported = true; + + if (data->registry_data.vr1hot_enabled) + data->smu_features[GNLD_VR1HOT].supported = true; + + if (data->registry_data.vr0hot_enabled) + data->smu_features[GNLD_VR0HOT].supported = true; + +} + +#ifdef PPLIB_VEGA10_EVV_SUPPORT +static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table, + uint16_t virtual_voltage_id, int32_t *socclk) +{ + uint8_t entry_id; + uint8_t voltage_id; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + + PP_ASSERT_WITH_CODE(lookup_table->count != 0, + "Lookup table is empty", + return -EINVAL); + + /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */ + for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) { + voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd; + if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id) + break; + } + + PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count, + "Can't find requested voltage id in vdd_dep_on_socclk table!", + return -EINVAL); + + *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk; + + return 0; +} + +#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01 +/** +* Get Leakage VDDC based on leakage ID. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0. +*/ +static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + uint16_t vv_id; + uint32_t vddc = 0; + uint16_t i, j; + uint32_t sclk = 0; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table = + table_info->vdd_dep_on_socclk; + int result; + + for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) { + vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; + + if (!vega10_get_socclk_for_voltage_evv(hwmgr, + table_info->vddc_lookup_table, vv_id, &sclk)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + for (j = 1; j < socclk_table->count; j++) { + if (socclk_table->entries[j].clk == sclk && + socclk_table->entries[j].cks_enable == 0) { + sclk += 5000; + break; + } + } + } + + PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, + VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc), + "Error retrieving EVV voltage value!", + continue); + + + /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ + PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0), + "Invalid VDDC value", result = -EINVAL;); + + /* the voltage should not be zero nor equal to leakage ID */ + if (vddc != 0 && vddc != vv_id) { + data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100); + data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id; + data->vddc_leakage.count++; + } + } + } + + return 0; +} + +/** + * Change virtual leakage voltage to actual value. + * + * @param hwmgr the address of the powerplay hardware manager. + * @param pointer to changing voltage + * @param pointer to leakage table + */ +static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr, + uint16_t *voltage, struct vega10_leakage_voltage *leakage_table) +{ + uint32_t index; + + /* search for leakage voltage ID 0xff01 ~ 0xff08 */ + for (index = 0; index < leakage_table->count; index++) { + /* if this voltage matches a leakage voltage ID */ + /* patch with actual leakage voltage */ + if (leakage_table->leakage_id[index] == *voltage) { + *voltage = leakage_table->actual_voltage[index]; + break; + } + } + + if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0) + pr_info("Voltage value looks like a Leakage ID \ + but it's not patched\n"); +} + +/** +* Patch voltage lookup table by EVV leakages. +* +* @param hwmgr the address of the powerplay hardware manager. +* @param pointer to voltage lookup table +* @param pointer to leakage table +* @return always 0 +*/ +static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table *lookup_table, + struct vega10_leakage_voltage *leakage_table) +{ + uint32_t i; + + for (i = 0; i < lookup_table->count; i++) + vega10_patch_with_vdd_leakage(hwmgr, + &lookup_table->entries[i].us_vdd, leakage_table); + + return 0; +} + +static int vega10_patch_clock_voltage_limits_with_vddc_leakage( + struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table, + uint16_t *vddc) +{ + vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table); + + return 0; +} +#endif + +static int vega10_patch_voltage_dependency_tables_with_lookup_table( + struct pp_hwmgr *hwmgr) +{ + uint8_t entry_id; + uint8_t voltage_id; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table = + table_info->vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table = + table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table = + table_info->vdd_dep_on_dcefclk; + struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table = + table_info->vdd_dep_on_pixclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table = + table_info->vdd_dep_on_dispclk; + struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table = + table_info->vdd_dep_on_phyclk; + struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = + table_info->vdd_dep_on_mclk; + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = + table_info->mm_dep_table; + + for (entry_id = 0; entry_id < socclk_table->count; entry_id++) { + voltage_id = socclk_table->entries[entry_id].vddInd; + socclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) { + voltage_id = gfxclk_table->entries[entry_id].vddInd; + gfxclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) { + voltage_id = dcefclk_table->entries[entry_id].vddInd; + dcefclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) { + voltage_id = pixclk_table->entries[entry_id].vddInd; + pixclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) { + voltage_id = dspclk_table->entries[entry_id].vddInd; + dspclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) { + voltage_id = phyclk_table->entries[entry_id].vddInd; + phyclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) { + voltage_id = mclk_table->entries[entry_id].vddInd; + mclk_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + voltage_id = mclk_table->entries[entry_id].vddciInd; + mclk_table->entries[entry_id].vddci = + table_info->vddci_lookup_table->entries[voltage_id].us_vdd; + voltage_id = mclk_table->entries[entry_id].mvddInd; + mclk_table->entries[entry_id].mvdd = + table_info->vddmem_lookup_table->entries[voltage_id].us_vdd; + } + + for (entry_id = 0; entry_id < mm_table->count; ++entry_id) { + voltage_id = mm_table->entries[entry_id].vddcInd; + mm_table->entries[entry_id].vddc = + table_info->vddc_lookup_table->entries[voltage_id].us_vdd; + } + + return 0; + +} + +static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_voltage_lookup_table *lookup_table) +{ + uint32_t table_size, i, j; + struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; + + PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count, + "Lookup table is empty", return -EINVAL); + + table_size = lookup_table->count; + + /* Sorting voltages */ + for (i = 0; i < table_size - 1; i++) { + for (j = i + 1; j > 0; j--) { + if (lookup_table->entries[j].us_vdd < + lookup_table->entries[j - 1].us_vdd) { + tmp_voltage_lookup_record = lookup_table->entries[j - 1]; + lookup_table->entries[j - 1] = lookup_table->entries[j]; + lookup_table->entries[j] = tmp_voltage_lookup_record; + } + } + } + + return 0; +} + +static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr) +{ + int result = 0; + int tmp_result; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); +#ifdef PPLIB_VEGA10_EVV_SUPPORT + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr, + table_info->vddc_lookup_table, &(data->vddc_leakage)); + if (tmp_result) + result = tmp_result; + + tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr, + &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc); + if (tmp_result) + result = tmp_result; +#endif + + tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr); + if (tmp_result) + result = tmp_result; + + tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table); + if (tmp_result) + result = tmp_result; + + return result; +} + +static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table = + table_info->vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table = + table_info->vdd_dep_on_mclk; + + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table, + "VDD dependency on SCLK table is missing. \ + This table is mandatory", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1, + "VDD dependency on SCLK table is empty. \ + This table is mandatory", return -EINVAL); + + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table, + "VDD dependency on MCLK table is missing. \ + This table is mandatory", return -EINVAL); + PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1, + "VDD dependency on MCLK table is empty. \ + This table is mandatory", return -EINVAL); + + table_info->max_clock_voltage_on_ac.sclk = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk; + table_info->max_clock_voltage_on_ac.mclk = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk; + table_info->max_clock_voltage_on_ac.vddc = + allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc; + table_info->max_clock_voltage_on_ac.vddci = + allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci; + + hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = + table_info->max_clock_voltage_on_ac.sclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = + table_info->max_clock_voltage_on_ac.mclk; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = + table_info->max_clock_voltage_on_ac.vddc; + hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = + table_info->max_clock_voltage_on_ac.vddci; + + return 0; +} + +static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) +{ + kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); + hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; + + kfree(hwmgr->backend); + hwmgr->backend = NULL; + + return 0; +} + +static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) +{ + int result = 0; + struct vega10_hwmgr *data; + uint32_t config_telemetry = 0; + struct pp_atomfwctrl_voltage_table vol_table; + struct cgs_system_info sys_info = {0}; + + data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + + vega10_set_default_registry_data(hwmgr); + + data->disable_dpm_mask = 0xff; + data->workload_mask = 0xff; + + /* need to set voltage control types before EVV patching */ + data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE; + data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE; + data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE; + + /* VDDCR_SOC */ + if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { + if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr, + VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2, + &vol_table)) { + config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) | + (vol_table.telemetry_offset & 0xff); + data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2; + } + } else { + kfree(hwmgr->backend); + hwmgr->backend = NULL; + PP_ASSERT_WITH_CODE(false, + "VDDCR_SOC is not SVID2!", + return -1); + } + + /* MVDDC */ + if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) { + if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr, + VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2, + &vol_table)) { + config_telemetry |= + ((vol_table.telemetry_slope << 24) & 0xff000000) | + ((vol_table.telemetry_offset << 16) & 0xff0000); + data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2; + } + } + + /* VDDCI_MEM */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ControlVDDCI)) { + if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr, + VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT)) + data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO; + } + + data->config_telemetry = config_telemetry; + + vega10_set_features_platform_caps(hwmgr); + + vega10_init_dpm_defaults(hwmgr); + +#ifdef PPLIB_VEGA10_EVV_SUPPORT + /* Get leakage voltage based on leakage ID. */ + PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr), + "Get EVV Voltage Failed. Abort Driver loading!", + return -1); +#endif + + /* Patch our voltage dependency table with actual leakage voltage + * We need to perform leakage translation before it's used by other functions + */ + vega10_complete_dependency_tables(hwmgr); + + /* Parse pptable data read from VBIOS */ + vega10_set_private_data_based_on_pptable(hwmgr); + + data->is_tlu_enabled = false; + + hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = + VEGA10_MAX_HARDWARE_POWERLEVELS; + hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; + hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50; + + hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */ + /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */ + hwmgr->platform_descriptor.clockStep.engineClock = 500; + hwmgr->platform_descriptor.clockStep.memoryClock = 500; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + data->total_active_cus = sys_info.value; + /* Setup default Overdrive Fan control settings */ + data->odn_fan_table.target_fan_speed = + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM; + data->odn_fan_table.target_temperature = + hwmgr->thermal_controller. + advanceFanControlParameters.ucTargetTemperature; + data->odn_fan_table.min_performance_clock = + hwmgr->thermal_controller.advanceFanControlParameters. + ulMinFanSCLKAcousticLimit; + data->odn_fan_table.min_fan_limit = + hwmgr->thermal_controller. + advanceFanControlParameters.usFanPWMMinLimit * + hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100; + + return result; +} + +static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + data->low_sclk_interrupt_threshold = 0; + + return 0; +} + +static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + + struct pp_atomfwctrl_voltage_table table; + uint8_t i, j; + uint32_t mask = 0; + uint32_t tmp; + int32_t ret = 0; + + ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM, + VOLTAGE_OBJ_GPIO_LUT, &table); + + if (!ret) { + tmp = table.mask_low; + for (i = 0, j = 0; i < 32; i++) { + if (tmp & 1) { + mask |= (uint32_t)(i << (8 * j)); + if (++j >= 3) + break; + } + tmp >>= 1; + } + } + + pp_table->LedPin0 = (uint8_t)(mask & 0xff); + pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff); + pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff); + return ret; +} + +static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr) +{ + PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr), + "Failed to init sclk threshold!", + return -EINVAL); + + PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr), + "Failed to set up led dpm config!", + return -EINVAL); + + return 0; +} + +static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr) +{ + uint32_t features_enabled; + + if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) { + if (features_enabled & SMC_DPM_FEATURES) + return true; + } + return false; +} + +/** +* Remove repeated voltage values and create table with unique values. +* +* @param hwmgr the address of the powerplay hardware manager. +* @param vol_table the pointer to changing voltage table +* @return 0 in success +*/ + +static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr, + struct pp_atomfwctrl_voltage_table *vol_table) +{ + uint32_t i, j; + uint16_t vvalue; + bool found = false; + struct pp_atomfwctrl_voltage_table *table; + + PP_ASSERT_WITH_CODE(vol_table, + "Voltage Table empty.", return -EINVAL); + table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table), + GFP_KERNEL); + + if (!table) + return -ENOMEM; + + table->mask_low = vol_table->mask_low; + table->phase_delay = vol_table->phase_delay; + + for (i = 0; i < vol_table->count; i++) { + vvalue = vol_table->entries[i].value; + found = false; + + for (j = 0; j < table->count; j++) { + if (vvalue == table->entries[j].value) { + found = true; + break; + } + } + + if (!found) { + table->entries[table->count].value = vvalue; + table->entries[table->count].smio_low = + vol_table->entries[i].smio_low; + table->count++; + } + } + + memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table)); + kfree(table); + + return 0; +} + +static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table *dep_table, + struct pp_atomfwctrl_voltage_table *vol_table) +{ + int i; + + PP_ASSERT_WITH_CODE(dep_table->count, + "Voltage Dependency Table empty.", + return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + vol_table->count = dep_table->count; + + for (i = 0; i < vol_table->count; i++) { + vol_table->entries[i].value = dep_table->entries[i].mvdd; + vol_table->entries[i].smio_low = 0; + } + + PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, + vol_table), + "Failed to trim MVDD Table!", + return -1); + + return 0; +} + +static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table *dep_table, + struct pp_atomfwctrl_voltage_table *vol_table) +{ + uint32_t i; + + PP_ASSERT_WITH_CODE(dep_table->count, + "Voltage Dependency Table empty.", + return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + vol_table->count = dep_table->count; + + for (i = 0; i < dep_table->count; i++) { + vol_table->entries[i].value = dep_table->entries[i].vddci; + vol_table->entries[i].smio_low = 0; + } + + PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table), + "Failed to trim VDDCI table.", + return -1); + + return 0; +} + +static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table *dep_table, + struct pp_atomfwctrl_voltage_table *vol_table) +{ + int i; + + PP_ASSERT_WITH_CODE(dep_table->count, + "Voltage Dependency Table empty.", + return -EINVAL); + + vol_table->mask_low = 0; + vol_table->phase_delay = 0; + vol_table->count = dep_table->count; + + for (i = 0; i < vol_table->count; i++) { + vol_table->entries[i].value = dep_table->entries[i].vddc; + vol_table->entries[i].smio_low = 0; + } + + return 0; +} + +/* ---- Voltage Tables ---- + * If the voltage table would be bigger than + * what will fit into the state table on + * the SMC keep only the higher entries. + */ +static void vega10_trim_voltage_table_to_fit_state_table( + struct pp_hwmgr *hwmgr, + uint32_t max_vol_steps, + struct pp_atomfwctrl_voltage_table *vol_table) +{ + unsigned int i, diff; + + if (vol_table->count <= max_vol_steps) + return; + + diff = vol_table->count - max_vol_steps; + + for (i = 0; i < max_vol_steps; i++) + vol_table->entries[i] = vol_table->entries[i + diff]; + + vol_table->count = max_vol_steps; +} + +/** +* Create Voltage Tables. +* +* @param hwmgr the address of the powerplay hardware manager. +* @return always 0 +*/ +static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + int result; + + if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 || + data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) { + result = vega10_get_mvdd_voltage_table(hwmgr, + table_info->vdd_dep_on_mclk, + &(data->mvdd_voltage_table)); + PP_ASSERT_WITH_CODE(!result, + "Failed to retrieve MVDDC table!", + return result); + } + + if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) { + result = vega10_get_vddci_voltage_table(hwmgr, + table_info->vdd_dep_on_mclk, + &(data->vddci_voltage_table)); + PP_ASSERT_WITH_CODE(!result, + "Failed to retrieve VDDCI_MEM table!", + return result); + } + + if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 || + data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) { + result = vega10_get_vdd_voltage_table(hwmgr, + table_info->vdd_dep_on_sclk, + &(data->vddc_voltage_table)); + PP_ASSERT_WITH_CODE(!result, + "Failed to retrieve VDDCR_SOC table!", + return result); + } + + PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16, + "Too many voltage values for VDDC. Trimming to fit state table.", + vega10_trim_voltage_table_to_fit_state_table(hwmgr, + 16, &(data->vddc_voltage_table))); + + PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16, + "Too many voltage values for VDDCI. Trimming to fit state table.", + vega10_trim_voltage_table_to_fit_state_table(hwmgr, + 16, &(data->vddci_voltage_table))); + + PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16, + "Too many voltage values for MVDD. Trimming to fit state table.", + vega10_trim_voltage_table_to_fit_state_table(hwmgr, + 16, &(data->mvdd_voltage_table))); + + + return 0; +} + +/* + * @fn vega10_init_dpm_state + * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff. + * + * @param dpm_state - the address of the DPM Table to initiailize. + * @return None. + */ +static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state) +{ + dpm_state->soft_min_level = 0xff; + dpm_state->soft_max_level = 0xff; + dpm_state->hard_min_level = 0xff; + dpm_state->hard_max_level = 0xff; +} + +static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr, + struct vega10_single_dpm_table *dpm_table, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) +{ + int i; + + for (i = 0; i < dep_table->count; i++) { + if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value != + dep_table->entries[i].clk) { + dpm_table->dpm_levels[dpm_table->count].value = + dep_table->entries[i].clk; + dpm_table->dpm_levels[dpm_table->count].enabled = true; + dpm_table->count++; + } + } +} +static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_pcie_table *bios_pcie_table = + table_info->pcie_table; + uint32_t i; + + PP_ASSERT_WITH_CODE(bios_pcie_table->count, + "Incorrect number of PCIE States from VBIOS!", + return -1); + + for (i = 0; i < NUM_LINK_LEVELS - 1; i++) { + if (data->registry_data.pcieSpeedOverride) + pcie_table->pcie_gen[i] = + data->registry_data.pcieSpeedOverride; + else + pcie_table->pcie_gen[i] = + bios_pcie_table->entries[i].gen_speed; + + if (data->registry_data.pcieLaneOverride) + pcie_table->pcie_lane[i] = + data->registry_data.pcieLaneOverride; + else + pcie_table->pcie_lane[i] = + bios_pcie_table->entries[i].lane_width; + + if (data->registry_data.pcieClockOverride) + pcie_table->lclk[i] = + data->registry_data.pcieClockOverride; + else + pcie_table->lclk[i] = + bios_pcie_table->entries[i].pcie_sclk; + + pcie_table->count++; + } + + if (data->registry_data.pcieSpeedOverride) + pcie_table->pcie_gen[i] = data->registry_data.pcieSpeedOverride; + else + pcie_table->pcie_gen[i] = + bios_pcie_table->entries[bios_pcie_table->count - 1].gen_speed; + + if (data->registry_data.pcieLaneOverride) + pcie_table->pcie_lane[i] = data->registry_data.pcieLaneOverride; + else + pcie_table->pcie_lane[i] = + bios_pcie_table->entries[bios_pcie_table->count - 1].lane_width; + + if (data->registry_data.pcieClockOverride) + pcie_table->lclk[i] = data->registry_data.pcieClockOverride; + else + pcie_table->lclk[i] = + bios_pcie_table->entries[bios_pcie_table->count - 1].pcie_sclk; + + pcie_table->count++; + + return 0; +} + +/* + * This function is to initialize all DPM state tables + * for SMU based on the dependency table. + * Dynamic state patching function will then trim these + * state tables to the allowed range based + * on the power policy or external client requests, + * such as UVD request, etc. + */ +static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct vega10_single_dpm_table *dpm_table; + uint32_t i; + + struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table = + table_info->vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table = + table_info->vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table = + table_info->vdd_dep_on_mclk; + struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table = + table_info->mm_dep_table; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table = + table_info->vdd_dep_on_dcefclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table = + table_info->vdd_dep_on_pixclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table = + table_info->vdd_dep_on_dispclk; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table = + table_info->vdd_dep_on_phyclk; + + PP_ASSERT_WITH_CODE(dep_soc_table, + "SOCCLK dependency table is missing. This table is mandatory", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1, + "SOCCLK dependency table is empty. This table is mandatory", + return -EINVAL); + + PP_ASSERT_WITH_CODE(dep_gfx_table, + "GFXCLK dependency table is missing. This table is mandatory", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1, + "GFXCLK dependency table is empty. This table is mandatory", + return -EINVAL); + + PP_ASSERT_WITH_CODE(dep_mclk_table, + "MCLK dependency table is missing. This table is mandatory", + return -EINVAL); + PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1, + "MCLK dependency table has to have is missing. This table is mandatory", + return -EINVAL); + + /* Initialize Sclk DPM table based on allow Sclk values */ + data->dpm_table.soc_table.count = 0; + data->dpm_table.gfx_table.count = 0; + data->dpm_table.dcef_table.count = 0; + + dpm_table = &(data->dpm_table.soc_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_soc_table); + + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + dpm_table = &(data->dpm_table.gfx_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_gfx_table); + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + /* Initialize Mclk DPM table based on allow Mclk values */ + data->dpm_table.mem_table.count = 0; + dpm_table = &(data->dpm_table.mem_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_mclk_table); + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + data->dpm_table.eclk_table.count = 0; + dpm_table = &(data->dpm_table.eclk_table); + for (i = 0; i < dep_mm_table->count; i++) { + if (i == 0 || dpm_table->dpm_levels + [dpm_table->count - 1].value != + dep_mm_table->entries[i].eclk) { + dpm_table->dpm_levels[dpm_table->count].value = + dep_mm_table->entries[i].eclk; + dpm_table->dpm_levels[dpm_table->count].enabled = + (i == 0) ? true : false; + dpm_table->count++; + } + } + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + data->dpm_table.vclk_table.count = 0; + data->dpm_table.dclk_table.count = 0; + dpm_table = &(data->dpm_table.vclk_table); + for (i = 0; i < dep_mm_table->count; i++) { + if (i == 0 || dpm_table->dpm_levels + [dpm_table->count - 1].value != + dep_mm_table->entries[i].vclk) { + dpm_table->dpm_levels[dpm_table->count].value = + dep_mm_table->entries[i].vclk; + dpm_table->dpm_levels[dpm_table->count].enabled = + (i == 0) ? true : false; + dpm_table->count++; + } + } + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + dpm_table = &(data->dpm_table.dclk_table); + for (i = 0; i < dep_mm_table->count; i++) { + if (i == 0 || dpm_table->dpm_levels + [dpm_table->count - 1].value != + dep_mm_table->entries[i].dclk) { + dpm_table->dpm_levels[dpm_table->count].value = + dep_mm_table->entries[i].dclk; + dpm_table->dpm_levels[dpm_table->count].enabled = + (i == 0) ? true : false; + dpm_table->count++; + } + } + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + /* Assume there is no headless Vega10 for now */ + dpm_table = &(data->dpm_table.dcef_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_dcef_table); + + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + dpm_table = &(data->dpm_table.pixel_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_pix_table); + + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + dpm_table = &(data->dpm_table.display_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_disp_table); + + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + dpm_table = &(data->dpm_table.phy_table); + vega10_setup_default_single_dpm_table(hwmgr, + dpm_table, + dep_phy_table); + + vega10_init_dpm_state(&(dpm_table->dpm_state)); + + vega10_setup_default_pcie_table(hwmgr); + + /* save a copy of the default DPM table */ + memcpy(&(data->golden_dpm_table), &(data->dpm_table), + sizeof(struct vega10_dpm_table)); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinDCSupport)) { + data->odn_dpm_table.odn_core_clock_dpm_levels. + number_of_performance_levels = data->dpm_table.gfx_table.count; + for (i = 0; i < data->dpm_table.gfx_table.count; i++) { + data->odn_dpm_table.odn_core_clock_dpm_levels. + performance_level_entries[i].clock = + data->dpm_table.gfx_table.dpm_levels[i].value; + data->odn_dpm_table.odn_core_clock_dpm_levels. + performance_level_entries[i].enabled = true; + } + + data->odn_dpm_table.vdd_dependency_on_sclk.count = + dep_gfx_table->count; + for (i = 0; i < dep_gfx_table->count; i++) { + data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk = + dep_gfx_table->entries[i].clk; + data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd = + dep_gfx_table->entries[i].vddInd; + data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable = + dep_gfx_table->entries[i].cks_enable; + data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset = + dep_gfx_table->entries[i].cks_voffset; + } + + data->odn_dpm_table.odn_memory_clock_dpm_levels. + number_of_performance_levels = data->dpm_table.mem_table.count; + for (i = 0; i < data->dpm_table.mem_table.count; i++) { + data->odn_dpm_table.odn_memory_clock_dpm_levels. + performance_level_entries[i].clock = + data->dpm_table.mem_table.dpm_levels[i].value; + data->odn_dpm_table.odn_memory_clock_dpm_levels. + performance_level_entries[i].enabled = true; + } + + data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count; + for (i = 0; i < dep_mclk_table->count; i++) { + data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk = + dep_mclk_table->entries[i].clk; + data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd = + dep_mclk_table->entries[i].vddInd; + data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci = + dep_mclk_table->entries[i].vddci; + } + } + + return 0; +} + +/* + * @fn vega10_populate_ulv_state + * @brief Function to provide parameters for Utral Low Voltage state to SMC. + * + * @param hwmgr - the address of the hardware manager. + * @return Always 0. + */ +static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + + data->smc_state_table.pp_table.UlvOffsetVid = + (uint8_t)(table_info->us_ulv_voltage_offset * + VOLTAGE_VID_OFFSET_SCALE2 / + VOLTAGE_VID_OFFSET_SCALE1); + + data->smc_state_table.pp_table.UlvSmnclkDid = + (uint8_t)(table_info->us_ulv_smnclk_did); + data->smc_state_table.pp_table.UlvMp1clkDid = + (uint8_t)(table_info->us_ulv_mp1clk_did); + data->smc_state_table.pp_table.UlvGfxclkBypass = + (uint8_t)(table_info->us_ulv_gfxclk_bypass); + data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 = + (uint8_t)(data->vddc_voltage_table.psi0_enable); + data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 = + (uint8_t)(data->vddc_voltage_table.psi1_enable); + + return 0; +} + +static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr, + uint32_t lclock, uint8_t *curr_lclk_did) +{ + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10( + hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + lclock, ÷rs), + "Failed to get LCLK clock settings from VBIOS!", + return -1); + + *curr_lclk_did = dividers.ulDid; + + return 0; +} + +static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr) +{ + int result = -1; + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct vega10_pcie_table *pcie_table = + &(data->dpm_table.pcie_table); + uint32_t i, j; + + for (i = 0; i < pcie_table->count; i++) { + pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i]; + pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i]; + + result = vega10_populate_single_lclk_level(hwmgr, + pcie_table->lclk[i], &(pp_table->LclkDid[i])); + if (result) { + pr_info("Populate LClock Level %d Failed!\n", i); + return result; + } + } + + j = i - 1; + while (i < NUM_LINK_LEVELS) { + pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j]; + pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j]; + + result = vega10_populate_single_lclk_level(hwmgr, + pcie_table->lclk[j], &(pp_table->LclkDid[i])); + if (result) { + pr_info("Populate LClock Level %d Failed!\n", i); + return result; + } + i++; + } + + return result; +} + +/** +* Populates single SMC GFXSCLK structure using the provided engine clock +* +* @param hwmgr the address of the hardware manager +* @param gfx_clock the GFX clock to use to populate the structure. +* @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure. +*/ + +static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr, + uint32_t gfx_clock, PllSetting_t *current_gfxclk_level) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk = + table_info->vdd_dep_on_sclk; + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + uint32_t i; + + if (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_VDDC) + dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) + &(data->odn_dpm_table.vdd_dependency_on_sclk); + + PP_ASSERT_WITH_CODE(dep_on_sclk, + "Invalid SOC_VDD-GFX_CLK Dependency Table!", + return -EINVAL); + + for (i = 0; i < dep_on_sclk->count; i++) { + if (dep_on_sclk->entries[i].clk == gfx_clock) + break; + } + + PP_ASSERT_WITH_CODE(dep_on_sclk->count > i, + "Cannot find gfx_clk in SOC_VDD-GFX_CLK!", + return -EINVAL); + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK, + gfx_clock, ÷rs), + "Failed to get GFX Clock settings from VBIOS!", + return -EINVAL); + + /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */ + current_gfxclk_level->FbMult = + cpu_to_le32(dividers.ulPll_fb_mult); + /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */ + current_gfxclk_level->SsOn = dividers.ucPll_ss_enable; + current_gfxclk_level->SsFbMult = + cpu_to_le32(dividers.ulPll_ss_fbsmult); + current_gfxclk_level->SsSlewFrac = + cpu_to_le16(dividers.usPll_ss_slew_frac); + current_gfxclk_level->Did = (uint8_t)(dividers.ulDid); + + return 0; +} + +/** + * @brief Populates single SMC SOCCLK structure using the provided clock. + * + * @param hwmgr - the address of the hardware manager. + * @param soc_clock - the SOC clock to use to populate the structure. + * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure. + * @return 0 on success.. + */ +static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr, + uint32_t soc_clock, uint8_t *current_soc_did, + uint8_t *current_vol_index) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc = + table_info->vdd_dep_on_socclk; + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + uint32_t i; + + PP_ASSERT_WITH_CODE(dep_on_soc, + "Invalid SOC_VDD-SOC_CLK Dependency Table!", + return -EINVAL); + for (i = 0; i < dep_on_soc->count; i++) { + if (dep_on_soc->entries[i].clk == soc_clock) + break; + } + PP_ASSERT_WITH_CODE(dep_on_soc->count > i, + "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table", + return -EINVAL); + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + soc_clock, ÷rs), + "Failed to get SOC Clock settings from VBIOS!", + return -EINVAL); + + *current_soc_did = (uint8_t)dividers.ulDid; + *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd); + + return 0; +} + +uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr, + uint32_t clk, + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table) +{ + uint16_t i; + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].clk == clk) + return dep_table->entries[i].vddc; + } + + pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!"); + return 0; +} + +/** +* Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states +* +* @param hwmgr the address of the hardware manager +*/ +static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_socclk; + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); + int result = 0; + uint32_t i, j; + + for (i = 0; i < dpm_table->count; i++) { + result = vega10_populate_single_gfx_level(hwmgr, + dpm_table->dpm_levels[i].value, + &(pp_table->GfxclkLevel[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_GFXCLK_DPM_LEVELS) { + result = vega10_populate_single_gfx_level(hwmgr, + dpm_table->dpm_levels[j].value, + &(pp_table->GfxclkLevel[i])); + if (result) + return result; + i++; + } + + pp_table->GfxclkSlewRate = + cpu_to_le16(table_info->us_gfxclk_slew_rate); + + dpm_table = &(data->dpm_table.soc_table); + for (i = 0; i < dpm_table->count; i++) { + pp_table->SocVid[i] = + (uint8_t)convert_to_vid( + vega10_locate_vddc_given_clock(hwmgr, + dpm_table->dpm_levels[i].value, + dep_table)); + result = vega10_populate_single_soc_level(hwmgr, + dpm_table->dpm_levels[i].value, + &(pp_table->SocclkDid[i]), + &(pp_table->SocDpmVoltageIndex[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_SOCCLK_DPM_LEVELS) { + pp_table->SocVid[i] = pp_table->SocVid[j]; + result = vega10_populate_single_soc_level(hwmgr, + dpm_table->dpm_levels[j].value, + &(pp_table->SocclkDid[i]), + &(pp_table->SocDpmVoltageIndex[i])); + if (result) + return result; + i++; + } + + return result; +} + +/** + * @brief Populates single SMC GFXCLK structure using the provided clock. + * + * @param hwmgr - the address of the hardware manager. + * @param mem_clock - the memory clock to use to populate the structure. + * @return 0 on success.. + */ +static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr, + uint32_t mem_clock, uint8_t *current_mem_vid, + PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk = + table_info->vdd_dep_on_mclk; + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + uint32_t i; + + if (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_VDDC) + dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *) + &data->odn_dpm_table.vdd_dependency_on_mclk; + + PP_ASSERT_WITH_CODE(dep_on_mclk, + "Invalid SOC_VDD-UCLK Dependency Table!", + return -EINVAL); + + for (i = 0; i < dep_on_mclk->count; i++) { + if (dep_on_mclk->entries[i].clk == mem_clock) + break; + } + + PP_ASSERT_WITH_CODE(dep_on_mclk->count > i, + "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!", + return -EINVAL); + + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10( + hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, ÷rs), + "Failed to get UCLK settings from VBIOS!", + return -1); + + *current_mem_vid = + (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd)); + *current_mem_soc_vind = + (uint8_t)(dep_on_mclk->entries[i].vddInd); + current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult); + current_memclk_level->Did = (uint8_t)(dividers.ulDid); + + PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1, + "Invalid Divider ID!", + return -EINVAL); + + return 0; +} + +/** + * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states. + * + * @param pHwMgr - the address of the hardware manager. + * @return PP_Result_OK on success. + */ +static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct vega10_single_dpm_table *dpm_table = + &(data->dpm_table.mem_table); + int result = 0; + uint32_t i, j, reg, mem_channels; + + for (i = 0; i < dpm_table->count; i++) { + result = vega10_populate_single_memory_level(hwmgr, + dpm_table->dpm_levels[i].value, + &(pp_table->MemVid[i]), + &(pp_table->UclkLevel[i]), + &(pp_table->MemSocVoltageIndex[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_UCLK_DPM_LEVELS) { + result = vega10_populate_single_memory_level(hwmgr, + dpm_table->dpm_levels[j].value, + &(pp_table->MemVid[i]), + &(pp_table->UclkLevel[i]), + &(pp_table->MemSocVoltageIndex[i])); + if (result) + return result; + i++; + } + + reg = soc15_get_register_offset(DF_HWID, 0, + mmDF_CS_AON0_DramBaseAddress0_BASE_IDX, + mmDF_CS_AON0_DramBaseAddress0); + mem_channels = (cgs_read_register(hwmgr->device, reg) & + DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >> + DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; + pp_table->NumMemoryChannels = cpu_to_le16(mem_channels); + pp_table->MemoryChannelWidth = + cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH * + channel_number[mem_channels]); + + pp_table->LowestUclkReservedForUlv = + (uint8_t)(data->lowest_uclk_reserved_for_ulv); + + return result; +} + +static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr, + DSPCLK_e disp_clock) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *) + (hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; + uint32_t i; + uint16_t clk = 0, vddc = 0; + uint8_t vid = 0; + + switch (disp_clock) { + case DSPCLK_DCEFCLK: + dep_table = table_info->vdd_dep_on_dcefclk; + break; + case DSPCLK_DISPCLK: + dep_table = table_info->vdd_dep_on_dispclk; + break; + case DSPCLK_PIXCLK: + dep_table = table_info->vdd_dep_on_pixclk; + break; + case DSPCLK_PHYCLK: + dep_table = table_info->vdd_dep_on_phyclk; + break; + default: + return -1; + } + + PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS, + "Number Of Entries Exceeded maximum!", + return -1); + + for (i = 0; i < dep_table->count; i++) { + clk = (uint16_t)(dep_table->entries[i].clk / 100); + vddc = table_info->vddc_lookup_table-> + entries[dep_table->entries[i].vddInd].us_vdd; + vid = (uint8_t)convert_to_vid(vddc); + pp_table->DisplayClockTable[disp_clock][i].Freq = + cpu_to_le16(clk); + pp_table->DisplayClockTable[disp_clock][i].Vid = + cpu_to_le16(vid); + } + + while (i < NUM_DSPCLK_LEVELS) { + pp_table->DisplayClockTable[disp_clock][i].Freq = + cpu_to_le16(clk); + pp_table->DisplayClockTable[disp_clock][i].Vid = + cpu_to_le16(vid); + i++; + } + + return 0; +} + +static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr) +{ + uint32_t i; + + for (i = 0; i < DSPCLK_COUNT; i++) { + PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i), + "Failed to populate Clock in DisplayClockTable!", + return -1); + } + + return 0; +} + +static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr, + uint32_t eclock, uint8_t *current_eclk_did, + uint8_t *current_soc_vol) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table = + table_info->mm_dep_table; + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + uint32_t i; + + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + eclock, ÷rs), + "Failed to get ECLK clock settings from VBIOS!", + return -1); + + *current_eclk_did = (uint8_t)dividers.ulDid; + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].eclk == eclock) + *current_soc_vol = dep_table->entries[i].vddcInd; + } + + return 0; +} + +static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table); + int result = -EINVAL; + uint32_t i, j; + + for (i = 0; i < dpm_table->count; i++) { + result = vega10_populate_single_eclock_level(hwmgr, + dpm_table->dpm_levels[i].value, + &(pp_table->EclkDid[i]), + &(pp_table->VceDpmVoltageIndex[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_VCE_DPM_LEVELS) { + result = vega10_populate_single_eclock_level(hwmgr, + dpm_table->dpm_levels[j].value, + &(pp_table->EclkDid[i]), + &(pp_table->VceDpmVoltageIndex[i])); + if (result) + return result; + i++; + } + + return result; +} + +static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr, + uint32_t vclock, uint8_t *current_vclk_did) +{ + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + vclock, ÷rs), + "Failed to get VCLK clock settings from VBIOS!", + return -EINVAL); + + *current_vclk_did = (uint8_t)dividers.ulDid; + + return 0; +} + +static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr, + uint32_t dclock, uint8_t *current_dclk_did) +{ + struct pp_atomfwctrl_clock_dividers_soc15 dividers; + + PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr, + COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, + dclock, ÷rs), + "Failed to get DCLK clock settings from VBIOS!", + return -EINVAL); + + *current_dclk_did = (uint8_t)dividers.ulDid; + + return 0; +} + +static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct vega10_single_dpm_table *vclk_dpm_table = + &(data->dpm_table.vclk_table); + struct vega10_single_dpm_table *dclk_dpm_table = + &(data->dpm_table.dclk_table); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table = + table_info->mm_dep_table; + int result = -EINVAL; + uint32_t i, j; + + for (i = 0; i < vclk_dpm_table->count; i++) { + result = vega10_populate_single_vclock_level(hwmgr, + vclk_dpm_table->dpm_levels[i].value, + &(pp_table->VclkDid[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_UVD_DPM_LEVELS) { + result = vega10_populate_single_vclock_level(hwmgr, + vclk_dpm_table->dpm_levels[j].value, + &(pp_table->VclkDid[i])); + if (result) + return result; + i++; + } + + for (i = 0; i < dclk_dpm_table->count; i++) { + result = vega10_populate_single_dclock_level(hwmgr, + dclk_dpm_table->dpm_levels[i].value, + &(pp_table->DclkDid[i])); + if (result) + return result; + } + + j = i - 1; + while (i < NUM_UVD_DPM_LEVELS) { + result = vega10_populate_single_dclock_level(hwmgr, + dclk_dpm_table->dpm_levels[j].value, + &(pp_table->DclkDid[i])); + if (result) + return result; + i++; + } + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].vclk == + vclk_dpm_table->dpm_levels[i].value && + dep_table->entries[i].dclk == + dclk_dpm_table->dpm_levels[i].value) + pp_table->UvdDpmVoltageIndex[i] = + dep_table->entries[i].vddcInd; + else + return -1; + } + + j = i - 1; + while (i < NUM_UVD_DPM_LEVELS) { + pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd; + i++; + } + + return 0; +} + +static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_sclk; + uint32_t i; + + for (i = 0; dep_table->count; i++) { + pp_table->CksEnable[i] = dep_table->entries[i].cks_enable; + pp_table->CksVidOffset[i] = convert_to_vid( + dep_table->entries[i].cks_voffset); + } + + return 0; +} + +static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_sclk; + struct pp_atomfwctrl_avfs_parameters avfs_params = {0}; + int result = 0; + uint32_t i; + + pp_table->MinVoltageVid = (uint8_t)0xff; + pp_table->MaxVoltageVid = (uint8_t)0; + + if (data->smu_features[GNLD_AVFS].supported) { + result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params); + if (!result) { + pp_table->MinVoltageVid = (uint8_t) + convert_to_vid((uint16_t)(avfs_params.ulMaxVddc)); + pp_table->MaxVoltageVid = (uint8_t) + convert_to_vid((uint16_t)(avfs_params.ulMinVddc)); + pp_table->BtcGbVdroopTableCksOn.a0 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0); + pp_table->BtcGbVdroopTableCksOn.a1 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1); + pp_table->BtcGbVdroopTableCksOn.a2 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2); + + pp_table->BtcGbVdroopTableCksOff.a0 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0); + pp_table->BtcGbVdroopTableCksOff.a1 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1); + pp_table->BtcGbVdroopTableCksOff.a2 = + cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2); + + pp_table->AvfsGbCksOn.m1 = + cpu_to_le32(avfs_params.ulGbFuseTableCksonM1); + pp_table->AvfsGbCksOn.m2 = + cpu_to_le16(avfs_params.usGbFuseTableCksonM2); + pp_table->AvfsGbCksOn.b = + cpu_to_le32(avfs_params.ulGbFuseTableCksonB); + pp_table->AvfsGbCksOn.m1_shift = 24; + pp_table->AvfsGbCksOn.m2_shift = 12; + + pp_table->AvfsGbCksOff.m1 = + cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1); + pp_table->AvfsGbCksOff.m2 = + cpu_to_le16(avfs_params.usGbFuseTableCksoffM2); + pp_table->AvfsGbCksOff.b = + cpu_to_le32(avfs_params.ulGbFuseTableCksoffB); + pp_table->AvfsGbCksOff.m1_shift = 24; + pp_table->AvfsGbCksOff.m2_shift = 12; + + pp_table->AConstant[0] = + cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0); + pp_table->AConstant[1] = + cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1); + pp_table->AConstant[2] = + cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2); + pp_table->DC_tol_sigma = + cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma); + pp_table->Platform_mean = + cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean); + pp_table->PSM_Age_CompFactor = + cpu_to_le16(avfs_params.usPsmAgeComfactor); + pp_table->Platform_sigma = + cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma); + + for (i = 0; i < dep_table->count; i++) + pp_table->StaticVoltageOffsetVid[i] = (uint8_t) + (dep_table->entries[i].sclk_offset * + VOLTAGE_VID_OFFSET_SCALE2 / + VOLTAGE_VID_OFFSET_SCALE1); + + pp_table->OverrideBtcGbCksOn = + avfs_params.ucEnableGbVdroopTableCkson; + pp_table->OverrideAvfsGbCksOn = + avfs_params.ucEnableGbFuseTableCkson; + + if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->disp_clk_quad_eqn_a) && + (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->disp_clk_quad_eqn_b)) { + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 = + (int32_t)data->disp_clk_quad_eqn_a; + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 = + (int16_t)data->disp_clk_quad_eqn_b; + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b = + (int32_t)data->disp_clk_quad_eqn_c; + } else { + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 = + (int32_t)avfs_params.ulDispclk2GfxclkM1; + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 = + (int16_t)avfs_params.usDispclk2GfxclkM2; + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b = + (int32_t)avfs_params.ulDispclk2GfxclkB; + } + + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24; + pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12; + + if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->dcef_clk_quad_eqn_a) && + (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->dcef_clk_quad_eqn_b)) { + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 = + (int32_t)data->dcef_clk_quad_eqn_a; + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 = + (int16_t)data->dcef_clk_quad_eqn_b; + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b = + (int32_t)data->dcef_clk_quad_eqn_c; + } else { + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 = + (int32_t)avfs_params.ulDcefclk2GfxclkM1; + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 = + (int16_t)avfs_params.usDcefclk2GfxclkM2; + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b = + (int32_t)avfs_params.ulDcefclk2GfxclkB; + } + + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24; + pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12; + + if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->pixel_clk_quad_eqn_a) && + (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->pixel_clk_quad_eqn_b)) { + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 = + (int32_t)data->pixel_clk_quad_eqn_a; + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 = + (int16_t)data->pixel_clk_quad_eqn_b; + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b = + (int32_t)data->pixel_clk_quad_eqn_c; + } else { + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 = + (int32_t)avfs_params.ulPixelclk2GfxclkM1; + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 = + (int16_t)avfs_params.usPixelclk2GfxclkM2; + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b = + (int32_t)avfs_params.ulPixelclk2GfxclkB; + } + + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24; + pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12; + + if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->phy_clk_quad_eqn_a) && + (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != + data->phy_clk_quad_eqn_b)) { + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 = + (int32_t)data->phy_clk_quad_eqn_a; + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 = + (int16_t)data->phy_clk_quad_eqn_b; + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b = + (int32_t)data->phy_clk_quad_eqn_c; + } else { + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 = + (int32_t)avfs_params.ulPhyclk2GfxclkM1; + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 = + (int16_t)avfs_params.usPhyclk2GfxclkM2; + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b = + (int32_t)avfs_params.ulPhyclk2GfxclkB; + } + + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24; + pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12; + } else { + data->smu_features[GNLD_AVFS].supported = false; + } + } + + return 0; +} + +static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct pp_atomfwctrl_gpio_parameters gpio_params = {0}; + int result; + + result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params); + if (!result) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot) && + (data->registry_data.regulator_hot_gpio_support)) { + pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio; + pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity; + pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio; + pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity; + } else { + pp_table->VR0HotGpio = 0; + pp_table->VR0HotPolarity = 0; + pp_table->VR1HotGpio = 0; + pp_table->VR1HotPolarity = 0; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_AutomaticDCTransition) && + (data->registry_data.ac_dc_switch_gpio_support)) { + pp_table->AcDcGpio = gpio_params.ucAcDcGpio; + pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity; + } else { + pp_table->AcDcGpio = 0; + pp_table->AcDcPolarity = 0; + } + } + + return result; +} + +static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_AVFS].supported) { + if (enable) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, + data->smu_features[GNLD_AVFS].smu_feature_bitmap), + "[avfs_control] Attempt to Enable AVFS feature Failed!", + return -1); + data->smu_features[GNLD_AVFS].enabled = true; + } else { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + false, + data->smu_features[GNLD_AVFS].smu_feature_id), + "[avfs_control] Attempt to Disable AVFS feature Failed!", + return -1); + data->smu_features[GNLD_AVFS].enabled = false; + } + } + + return 0; +} + +/** +* Initializes the SMC table and uploads it +* +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data (PowerState) +* @return always 0 +*/ +static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) +{ + int result; + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + struct pp_atomfwctrl_voltage_table voltage_table; + + result = vega10_setup_default_dpm_tables(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to setup default DPM tables!", + return result); + + pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_SVID2, &voltage_table); + pp_table->MaxVidStep = voltage_table.max_vid_step; + + pp_table->GfxDpmVoltageMode = + (uint8_t)(table_info->uc_gfx_dpm_voltage_mode); + pp_table->SocDpmVoltageMode = + (uint8_t)(table_info->uc_soc_dpm_voltage_mode); + pp_table->UclkDpmVoltageMode = + (uint8_t)(table_info->uc_uclk_dpm_voltage_mode); + pp_table->UvdDpmVoltageMode = + (uint8_t)(table_info->uc_uvd_dpm_voltage_mode); + pp_table->VceDpmVoltageMode = + (uint8_t)(table_info->uc_vce_dpm_voltage_mode); + pp_table->Mp0DpmVoltageMode = + (uint8_t)(table_info->uc_mp0_dpm_voltage_mode); + pp_table->DisplayDpmVoltageMode = + (uint8_t)(table_info->uc_dcef_dpm_voltage_mode); + + if (data->registry_data.ulv_support && + table_info->us_ulv_voltage_offset) { + result = vega10_populate_ulv_state(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize ULV state!", + return result); + } + + result = vega10_populate_smc_link_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Link Level!", + return result); + + result = vega10_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Graphics Level!", + return result); + + result = vega10_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Memory Level!", + return result); + + result = vega10_populate_all_display_clock_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize Display Level!", + return result); + + result = vega10_populate_smc_vce_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize VCE Level!", + return result); + + result = vega10_populate_smc_uvd_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize UVD Level!", + return result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher)) { + result = vega10_populate_clock_stretcher_table(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate Clock Stretcher Table!", + return result); + } + + result = vega10_populate_avfs_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize AVFS Parameters!", + return result); + + result = vega10_populate_gpio_parameters(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to initialize GPIO Parameters!", + return result); + + pp_table->GfxclkAverageAlpha = (uint8_t) + (data->gfxclk_average_alpha); + pp_table->SocclkAverageAlpha = (uint8_t) + (data->socclk_average_alpha); + pp_table->UclkAverageAlpha = (uint8_t) + (data->uclk_average_alpha); + pp_table->GfxActivityAverageAlpha = (uint8_t) + (data->gfx_activity_average_alpha); + + result = vega10_copy_table_to_smc(hwmgr->smumgr, + (uint8_t *)pp_table, PPTABLE); + PP_ASSERT_WITH_CODE(!result, + "Failed to upload PPtable!", return result); + + if (data->smu_features[GNLD_AVFS].supported) { + uint32_t features_enabled; + result = vega10_get_smc_features(hwmgr->smumgr, &features_enabled); + PP_ASSERT_WITH_CODE(!result, + "Failed to Retrieve Enabled Features!", + return result); + if (!(features_enabled & (1 << FEATURE_AVFS_BIT))) { + result = vega10_perform_btc(hwmgr->smumgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to Perform BTC!", + return result); + result = vega10_avfs_enable(hwmgr, true); + PP_ASSERT_WITH_CODE(!result, + "Attempt to enable AVFS feature Failed!", + return result); + result = vega10_save_vft_table(hwmgr->smumgr, + (uint8_t *)&(data->smc_state_table.avfs_table)); + PP_ASSERT_WITH_CODE(!result, + "Attempt to save VFT table Failed!", + return result); + } else { + data->smu_features[GNLD_AVFS].enabled = true; + result = vega10_restore_vft_table(hwmgr->smumgr, + (uint8_t *)&(data->smc_state_table.avfs_table)); + PP_ASSERT_WITH_CODE(!result, + "Attempt to restore VFT table Failed!", + return result;); + } + } + + return 0; +} + +static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_THERMAL].supported) { + if (data->smu_features[GNLD_THERMAL].enabled) + pr_info("THERMAL Feature Already enabled!"); + + PP_ASSERT_WITH_CODE( + !vega10_enable_smc_features(hwmgr->smumgr, + true, + data->smu_features[GNLD_THERMAL].smu_feature_bitmap), + "Enable THERMAL Feature Failed!", + return -1); + data->smu_features[GNLD_THERMAL].enabled = true; + } + + return 0; +} + +static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_RegulatorHot)) { + if (data->smu_features[GNLD_VR0HOT].supported) { + PP_ASSERT_WITH_CODE( + !vega10_enable_smc_features(hwmgr->smumgr, + true, + data->smu_features[GNLD_VR0HOT].smu_feature_bitmap), + "Attempt to Enable VR0 Hot feature Failed!", + return -1); + data->smu_features[GNLD_VR0HOT].enabled = true; + } else { + if (data->smu_features[GNLD_VR1HOT].supported) { + PP_ASSERT_WITH_CODE( + !vega10_enable_smc_features(hwmgr->smumgr, + true, + data->smu_features[GNLD_VR1HOT].smu_feature_bitmap), + "Attempt to Enable VR0 Hot feature Failed!", + return -1); + data->smu_features[GNLD_VR1HOT].enabled = true; + } + } + } + return 0; +} + +static int vega10_enable_ulv(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->registry_data.ulv_support) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_ULV].smu_feature_bitmap), + "Enable ULV Feature Failed!", + return -1); + data->smu_features[GNLD_ULV].enabled = true; + } + + return 0; +} + +static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DS_GFXCLK].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap), + "Attempt to Enable DS_GFXCLK Feature Failed!", + return -1); + data->smu_features[GNLD_DS_GFXCLK].enabled = true; + } + + if (data->smu_features[GNLD_DS_SOCCLK].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap), + "Attempt to Enable DS_GFXCLK Feature Failed!", + return -1); + data->smu_features[GNLD_DS_SOCCLK].enabled = true; + } + + if (data->smu_features[GNLD_DS_LCLK].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap), + "Attempt to Enable DS_GFXCLK Feature Failed!", + return -1); + data->smu_features[GNLD_DS_LCLK].enabled = true; + } + + return 0; +} + +/** + * @brief Tell SMC to enabled the supported DPMs. + * + * @param hwmgr - the address of the powerplay hardware manager. + * @Param bitmap - bitmap for the features to enabled. + * @return 0 on at least one DPM is successfully enabled. + */ +static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t i, feature_mask = 0; + + for (i = 0; i < GNLD_DPM_MAX; i++) { + if (data->smu_features[i].smu_feature_bitmap & bitmap) { + if (data->smu_features[i].supported) { + if (!data->smu_features[i].enabled) { + feature_mask |= data->smu_features[i]. + smu_feature_bitmap; + data->smu_features[i].enabled = true; + } + } + } + } + + if (vega10_enable_smc_features(hwmgr->smumgr, + true, feature_mask)) { + for (i = 0; i < GNLD_DPM_MAX; i++) { + if (data->smu_features[i].smu_feature_bitmap & + feature_mask) + data->smu_features[i].enabled = false; + } + } + + if(data->smu_features[GNLD_LED_DISPLAY].supported == true){ + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap), + "Attempt to Enable LED DPM feature Failed!", return -EINVAL); + data->smu_features[GNLD_LED_DISPLAY].enabled = true; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_Falcon_QuickTransition)) { + if (data->smu_features[GNLD_ACDC].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_ACDC].smu_feature_bitmap), + "Attempt to Enable DS_GFXCLK Feature Failed!", + return -1); + data->smu_features[GNLD_ACDC].enabled = true; + } + } + + return 0; +} + +static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + int tmp_result, result = 0; + + tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to configure telemetry!", + return tmp_result); + + vega10_set_tools_address(hwmgr->smumgr); + + tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(!tmp_result, + "DPM is already running right , skipping re-enablement!", + return 0); + + tmp_result = vega10_construct_voltage_tables(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to contruct voltage tables!", + result = tmp_result); + + tmp_result = vega10_init_smc_table(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to initialize SMC table!", + result = tmp_result); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) { + tmp_result = vega10_enable_thermal_protection(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable thermal protection!", + result = tmp_result); + } + + tmp_result = vega10_enable_vrhot_feature(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable VR hot feature!", + result = tmp_result); + + tmp_result = vega10_enable_ulv(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable ULV!", + result = tmp_result); + + tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable deep sleep master switch!", + result = tmp_result); + + tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to start DPM!", result = tmp_result); + + tmp_result = vega10_enable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to enable power containment!", + result = tmp_result); + + tmp_result = vega10_power_control_set_level(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to power control set level!", + result = tmp_result); + + return result; +} + +static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr) +{ + return sizeof(struct vega10_power_state); +} + +static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, + void *state, struct pp_power_state *power_state, + void *pp_table, uint32_t classification_flag) +{ + struct vega10_power_state *vega10_power_state = + cast_phw_vega10_power_state(&(power_state->hardware)); + struct vega10_performance_level *performance_level; + ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state; + ATOM_Vega10_POWERPLAYTABLE *powerplay_table = + (ATOM_Vega10_POWERPLAYTABLE *)pp_table; + ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table = + (ATOM_Vega10_SOCCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset)); + ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table = + (ATOM_Vega10_GFXCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset)); + ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table = + (ATOM_Vega10_MCLK_Dependency_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); + + + /* The following fields are not initialized here: + * id orderedList allStatesList + */ + power_state->classification.ui_label = + (le16_to_cpu(state_entry->usClassification) & + ATOM_PPLIB_CLASSIFICATION_UI_MASK) >> + ATOM_PPLIB_CLASSIFICATION_UI_SHIFT; + power_state->classification.flags = classification_flag; + /* NOTE: There is a classification2 flag in BIOS + * that is not being used right now + */ + power_state->classification.temporary_state = false; + power_state->classification.to_be_deleted = false; + + power_state->validation.disallowOnDC = + ((le32_to_cpu(state_entry->ulCapsAndSettings) & + ATOM_Vega10_DISALLOW_ON_DC) != 0); + + power_state->display.disableFrameModulation = false; + power_state->display.limitRefreshrate = false; + power_state->display.enableVariBright = + ((le32_to_cpu(state_entry->ulCapsAndSettings) & + ATOM_Vega10_ENABLE_VARIBRIGHT) != 0); + + power_state->validation.supportedPowerLevels = 0; + power_state->uvd_clocks.VCLK = 0; + power_state->uvd_clocks.DCLK = 0; + power_state->temperatures.min = 0; + power_state->temperatures.max = 0; + + performance_level = &(vega10_power_state->performance_levels + [vega10_power_state->performance_level_count++]); + + PP_ASSERT_WITH_CODE( + (vega10_power_state->performance_level_count < + NUM_GFXCLK_DPM_LEVELS), + "Performance levels exceeds SMC limit!", + return -1); + + PP_ASSERT_WITH_CODE( + (vega10_power_state->performance_level_count <= + hwmgr->platform_descriptor. + hardwareActivityPerformanceLevels), + "Performance levels exceeds Driver limit!", + return -1); + + /* Performance levels are arranged from low to high. */ + performance_level->soc_clock = socclk_dep_table->entries + [state_entry->ucSocClockIndexLow].ulClk; + performance_level->gfx_clock = gfxclk_dep_table->entries + [state_entry->ucGfxClockIndexLow].ulClk; + performance_level->mem_clock = mclk_dep_table->entries + [state_entry->ucMemClockIndexLow].ulMemClk; + + performance_level = &(vega10_power_state->performance_levels + [vega10_power_state->performance_level_count++]); + + performance_level->soc_clock = socclk_dep_table->entries + [state_entry->ucSocClockIndexHigh].ulClk; + performance_level->gfx_clock = gfxclk_dep_table->entries + [state_entry->ucGfxClockIndexHigh].ulClk; + performance_level->mem_clock = mclk_dep_table->entries + [state_entry->ucMemClockIndexHigh].ulMemClk; + return 0; +} + +static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr, + unsigned long entry_index, struct pp_power_state *state) +{ + int result; + struct vega10_power_state *ps; + + state->hardware.magic = PhwVega10_Magic; + + ps = cast_phw_vega10_power_state(&state->hardware); + + result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state, + vega10_get_pp_table_entry_callback_func); + + /* + * This is the earliest time we have all the dependency table + * and the VBIOS boot state + */ + /* set DC compatible flag if this state supports DC */ + if (!state->validation.disallowOnDC) + ps->dc_compatible = true; + + ps->uvd_clks.vclk = state->uvd_clocks.VCLK; + ps->uvd_clks.dclk = state->uvd_clocks.DCLK; + + return 0; +} + +static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps) +{ + return 0; +} + +static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *request_ps, + const struct pp_power_state *current_ps) +{ + struct vega10_power_state *vega10_ps = + cast_phw_vega10_power_state(&request_ps->hardware); + uint32_t sclk; + uint32_t mclk; + struct PP_Clocks minimum_clocks = {0}; + bool disable_mclk_switching; + bool disable_mclk_switching_for_frame_lock; + bool disable_mclk_switching_for_vr; + bool force_mclk_high; + struct cgs_display_info info = {0}; + const struct phm_clock_and_voltage_limits *max_limits; + uint32_t i; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + int32_t count; + uint32_t stable_pstate_sclk_dpm_percentage; + uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0; + uint32_t latency; + + data->battery_state = (PP_StateUILabel_Battery == + request_ps->classification.ui_label); + + if (vega10_ps->performance_level_count != 2) + pr_info("VI should always have 2 performance levels"); + + max_limits = (PP_PowerSource_AC == hwmgr->power_source) ? + &(hwmgr->dyn_state.max_clock_voltage_on_ac) : + &(hwmgr->dyn_state.max_clock_voltage_on_dc); + + /* Cap clock DPM tables at DC MAX if it is in DC. */ + if (PP_PowerSource_DC == hwmgr->power_source) { + for (i = 0; i < vega10_ps->performance_level_count; i++) { + if (vega10_ps->performance_levels[i].mem_clock > + max_limits->mclk) + vega10_ps->performance_levels[i].mem_clock = + max_limits->mclk; + if (vega10_ps->performance_levels[i].gfx_clock > + max_limits->sclk) + vega10_ps->performance_levels[i].gfx_clock = + max_limits->sclk; + } + } + + vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk; + vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk; + + cgs_get_active_displays_info(hwmgr->device, &info); + + /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/ + minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock; + /* minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock; */ + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + PP_ASSERT_WITH_CODE( + data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 && + data->registry_data.stable_pstate_sclk_dpm_percentage <= 100, + "percent sclk value must range from 1% to 100%, setting default value", + stable_pstate_sclk_dpm_percentage = 75); + + max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac); + stable_pstate_sclk = (max_limits->sclk * + stable_pstate_sclk_dpm_percentage) / 100; + + for (count = table_info->vdd_dep_on_sclk->count - 1; + count >= 0; count--) { + if (stable_pstate_sclk >= + table_info->vdd_dep_on_sclk->entries[count].clk) { + stable_pstate_sclk = + table_info->vdd_dep_on_sclk->entries[count].clk; + break; + } + } + + if (count < 0) + stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk; + + stable_pstate_mclk = max_limits->mclk; + + minimum_clocks.engineClock = stable_pstate_sclk; + minimum_clocks.memoryClock = stable_pstate_mclk; + } + + if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk) + minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk; + + if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk) + minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk; + + vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold; + + if (hwmgr->gfx_arbiter.sclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <= + hwmgr->platform_descriptor.overdriveLimit.engineClock), + "Overdrive sclk exceeds limit", + hwmgr->gfx_arbiter.sclk_over_drive = + hwmgr->platform_descriptor.overdriveLimit.engineClock); + + if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk) + vega10_ps->performance_levels[1].gfx_clock = + hwmgr->gfx_arbiter.sclk_over_drive; + } + + if (hwmgr->gfx_arbiter.mclk_over_drive) { + PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <= + hwmgr->platform_descriptor.overdriveLimit.memoryClock), + "Overdrive mclk exceeds limit", + hwmgr->gfx_arbiter.mclk_over_drive = + hwmgr->platform_descriptor.overdriveLimit.memoryClock); + + if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk) + vega10_ps->performance_levels[1].mem_clock = + hwmgr->gfx_arbiter.mclk_over_drive; + } + + disable_mclk_switching_for_frame_lock = phm_cap_enabled( + hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_DisableMclkSwitchForVR); + force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ForceMclkHigh); + + disable_mclk_switching = (info.display_count > 1) || + disable_mclk_switching_for_frame_lock || + disable_mclk_switching_for_vr || + force_mclk_high; + + sclk = vega10_ps->performance_levels[0].gfx_clock; + mclk = vega10_ps->performance_levels[0].mem_clock; + + if (sclk < minimum_clocks.engineClock) + sclk = (minimum_clocks.engineClock > max_limits->sclk) ? + max_limits->sclk : minimum_clocks.engineClock; + + if (mclk < minimum_clocks.memoryClock) + mclk = (minimum_clocks.memoryClock > max_limits->mclk) ? + max_limits->mclk : minimum_clocks.memoryClock; + + vega10_ps->performance_levels[0].gfx_clock = sclk; + vega10_ps->performance_levels[0].mem_clock = mclk; + + vega10_ps->performance_levels[1].gfx_clock = + (vega10_ps->performance_levels[1].gfx_clock >= + vega10_ps->performance_levels[0].gfx_clock) ? + vega10_ps->performance_levels[1].gfx_clock : + vega10_ps->performance_levels[0].gfx_clock; + + if (disable_mclk_switching) { + /* Set Mclk the max of level 0 and level 1 */ + if (mclk < vega10_ps->performance_levels[1].mem_clock) + mclk = vega10_ps->performance_levels[1].mem_clock; + + /* Find the lowest MCLK frequency that is within + * the tolerable latency defined in DAL + */ + latency = 0; + for (i = 0; i < data->mclk_latency_table.count; i++) { + if ((data->mclk_latency_table.entries[i].latency <= latency) && + (data->mclk_latency_table.entries[i].frequency >= + vega10_ps->performance_levels[0].mem_clock) && + (data->mclk_latency_table.entries[i].frequency <= + vega10_ps->performance_levels[1].mem_clock)) + mclk = data->mclk_latency_table.entries[i].frequency; + } + vega10_ps->performance_levels[0].mem_clock = mclk; + } else { + if (vega10_ps->performance_levels[1].mem_clock < + vega10_ps->performance_levels[0].mem_clock) + vega10_ps->performance_levels[1].mem_clock = + vega10_ps->performance_levels[0].mem_clock; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) { + for (i = 0; i < vega10_ps->performance_level_count; i++) { + vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk; + vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk; + } + } + + return 0; +} + +static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct vega10_power_state *vega10_ps = + cast_const_phw_vega10_power_state(states->pnew_state); + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct vega10_single_dpm_table *sclk_table = + &(data->dpm_table.gfx_table); + uint32_t sclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock; + struct vega10_single_dpm_table *mclk_table = + &(data->dpm_table.mem_table); + uint32_t mclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].mem_clock; + struct PP_Clocks min_clocks = {0}; + uint32_t i; + struct cgs_display_info info = {0}; + + data->need_update_dpm_table = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinDCSupport)) { + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; + } + + if (!(data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) { + /* Check SCLK in DAL's minimum clocks + * in case DeepSleep divider update is required. + */ + if (data->display_timing.min_clock_in_sr != + min_clocks.engineClockInSR && + (min_clocks.engineClockInSR >= + VEGA10_MINIMUM_ENGINE_CLOCK || + data->display_timing.min_clock_in_sr >= + VEGA10_MINIMUM_ENGINE_CLOCK)) + data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != + info.display_count) + data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; + } else { + for (i = 0; i < sclk_table->count; i++) { + if (sclk == sclk_table->dpm_levels[i].value) + break; + } + + if (i >= sclk_table->count) + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + else { + /* Check SCLK in DAL's minimum clocks + * in case DeepSleep divider update is required. + */ + if (data->display_timing.min_clock_in_sr != + min_clocks.engineClockInSR && + (min_clocks.engineClockInSR >= + VEGA10_MINIMUM_ENGINE_CLOCK || + data->display_timing.min_clock_in_sr >= + VEGA10_MINIMUM_ENGINE_CLOCK)) + data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK; + } + + for (i = 0; i < mclk_table->count; i++) { + if (mclk == mclk_table->dpm_levels[i].value) + break; + } + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (i >= mclk_table->count) + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + + if (data->display_timing.num_existing_displays != + info.display_count || + i >= mclk_table->count) + data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK; + } + return 0; +} + +static int vega10_populate_and_upload_sclk_mclk_dpm_levels( + struct pp_hwmgr *hwmgr, const void *input) +{ + int result = 0; + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct vega10_power_state *vega10_ps = + cast_const_phw_vega10_power_state(states->pnew_state); + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t sclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock; + uint32_t mclk = vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].mem_clock; + struct vega10_dpm_table *dpm_table = &data->dpm_table; + struct vega10_dpm_table *golden_dpm_table = + &data->golden_dpm_table; + uint32_t dpm_count, clock_percent; + uint32_t i; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ODNinDCSupport)) { + + if (!data->need_update_dpm_table && + !data->apply_optimized_settings && + !data->apply_overdrive_next_settings_mask) + return 0; + + if (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_SCLK) { + for (dpm_count = 0; + dpm_count < dpm_table->gfx_table.count; + dpm_count++) { + dpm_table->gfx_table.dpm_levels[dpm_count].enabled = + data->odn_dpm_table.odn_core_clock_dpm_levels. + performance_level_entries[dpm_count].enabled; + dpm_table->gfx_table.dpm_levels[dpm_count].value = + data->odn_dpm_table.odn_core_clock_dpm_levels. + performance_level_entries[dpm_count].clock; + } + } + + if (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_MCLK) { + for (dpm_count = 0; + dpm_count < dpm_table->mem_table.count; + dpm_count++) { + dpm_table->mem_table.dpm_levels[dpm_count].enabled = + data->odn_dpm_table.odn_memory_clock_dpm_levels. + performance_level_entries[dpm_count].enabled; + dpm_table->mem_table.dpm_levels[dpm_count].value = + data->odn_dpm_table.odn_memory_clock_dpm_levels. + performance_level_entries[dpm_count].clock; + } + } + + if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) || + data->apply_optimized_settings || + (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_SCLK)) { + result = vega10_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate SCLK during \ + PopulateNewDPMClocksStates Function!", + return result); + } + + if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) || + (data->apply_overdrive_next_settings_mask & + DPMTABLE_OD_UPDATE_MCLK)){ + result = vega10_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate MCLK during \ + PopulateNewDPMClocksStates Function!", + return result); + } + } else { + if (!data->need_update_dpm_table && + !data->apply_optimized_settings) + return 0; + + if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK && + data->smu_features[GNLD_DPM_GFXCLK].supported) { + dpm_table-> + gfx_table.dpm_levels[dpm_table->gfx_table.count - 1]. + value = sclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinDCSupport)) { + /* Need to do calculation based on the golden DPM table + * as the Heatmap GPU Clock axis is also based on + * the default values + */ + PP_ASSERT_WITH_CODE( + golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count - 1].value, + "Divide by 0!", + return -1); + + dpm_count = dpm_table->gfx_table.count < 2 ? + 0 : dpm_table->gfx_table.count - 2; + for (i = dpm_count; i > 1; i--) { + if (sclk > golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count - 1].value) { + clock_percent = + ((sclk - golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count - 1].value) * + 100) / + golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count - 1].value; + + dpm_table->gfx_table.dpm_levels[i].value = + golden_dpm_table->gfx_table.dpm_levels[i].value + + (golden_dpm_table->gfx_table.dpm_levels[i].value * + clock_percent) / 100; + } else if (golden_dpm_table-> + gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value > + sclk) { + clock_percent = + ((golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count - 1].value - + sclk) * 100) / + golden_dpm_table->gfx_table.dpm_levels + [golden_dpm_table->gfx_table.count-1].value; + + dpm_table->gfx_table.dpm_levels[i].value = + golden_dpm_table->gfx_table.dpm_levels[i].value - + (golden_dpm_table->gfx_table.dpm_levels[i].value * + clock_percent) / 100; + } else + dpm_table->gfx_table.dpm_levels[i].value = + golden_dpm_table->gfx_table.dpm_levels[i].value; + } + } + } + + if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK && + data->smu_features[GNLD_DPM_UCLK].supported) { + dpm_table-> + mem_table.dpm_levels[dpm_table->mem_table.count - 1]. + value = mclk; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinACSupport) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_OD6PlusinDCSupport)) { + + PP_ASSERT_WITH_CODE( + golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count - 1].value, + "Divide by 0!", + return -1); + + dpm_count = dpm_table->mem_table.count < 2 ? + 0 : dpm_table->mem_table.count - 2; + for (i = dpm_count; i > 1; i--) { + if (mclk > golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count-1].value) { + clock_percent = ((mclk - + golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count-1].value) * + 100) / + golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count-1].value; + + dpm_table->mem_table.dpm_levels[i].value = + golden_dpm_table->mem_table.dpm_levels[i].value + + (golden_dpm_table->mem_table.dpm_levels[i].value * + clock_percent) / 100; + } else if (golden_dpm_table->mem_table.dpm_levels + [dpm_table->mem_table.count-1].value > mclk) { + clock_percent = ((golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count-1].value - mclk) * + 100) / + golden_dpm_table->mem_table.dpm_levels + [golden_dpm_table->mem_table.count-1].value; + + dpm_table->mem_table.dpm_levels[i].value = + golden_dpm_table->mem_table.dpm_levels[i].value - + (golden_dpm_table->mem_table.dpm_levels[i].value * + clock_percent) / 100; + } else + dpm_table->mem_table.dpm_levels[i].value = + golden_dpm_table->mem_table.dpm_levels[i].value; + } + } + } + + if ((data->need_update_dpm_table & + (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) || + data->apply_optimized_settings) { + result = vega10_populate_all_graphic_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate SCLK during \ + PopulateNewDPMClocksStates Function!", + return result); + } + + if (data->need_update_dpm_table & + (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) { + result = vega10_populate_all_memory_levels(hwmgr); + PP_ASSERT_WITH_CODE(!result, + "Failed to populate MCLK during \ + PopulateNewDPMClocksStates Function!", + return result); + } + } + + return result; +} + +static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr, + struct vega10_single_dpm_table *dpm_table, + uint32_t low_limit, uint32_t high_limit) +{ + uint32_t i; + + for (i = 0; i < dpm_table->count; i++) { + if ((dpm_table->dpm_levels[i].value < low_limit) || + (dpm_table->dpm_levels[i].value > high_limit)) + dpm_table->dpm_levels[i].enabled = false; + else + dpm_table->dpm_levels[i].enabled = true; + } + return 0; +} + +static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr, + struct vega10_single_dpm_table *dpm_table, + uint32_t low_limit, uint32_t high_limit, + uint32_t disable_dpm_mask) +{ + uint32_t i; + + for (i = 0; i < dpm_table->count; i++) { + if ((dpm_table->dpm_levels[i].value < low_limit) || + (dpm_table->dpm_levels[i].value > high_limit)) + dpm_table->dpm_levels[i].enabled = false; + else if (!((1 << i) & disable_dpm_mask)) + dpm_table->dpm_levels[i].enabled = false; + else + dpm_table->dpm_levels[i].enabled = true; + } + return 0; +} + +static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr, + const struct vega10_power_state *vega10_ps) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t high_limit_count; + + PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1), + "power state did not have any performance level", + return -1); + + high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1; + + vega10_trim_single_dpm_states(hwmgr, + &(data->dpm_table.soc_table), + vega10_ps->performance_levels[0].soc_clock, + vega10_ps->performance_levels[high_limit_count].soc_clock); + + vega10_trim_single_dpm_states_with_mask(hwmgr, + &(data->dpm_table.gfx_table), + vega10_ps->performance_levels[0].gfx_clock, + vega10_ps->performance_levels[high_limit_count].gfx_clock, + data->disable_dpm_mask); + + vega10_trim_single_dpm_states(hwmgr, + &(data->dpm_table.mem_table), + vega10_ps->performance_levels[0].mem_clock, + vega10_ps->performance_levels[high_limit_count].mem_clock); + + return 0; +} + +static uint32_t vega10_find_lowest_dpm_level( + struct vega10_single_dpm_table *table) +{ + uint32_t i; + + for (i = 0; i < table->count; i++) { + if (table->dpm_levels[i].enabled) + break; + } + + return i; +} + +static uint32_t vega10_find_highest_dpm_level( + struct vega10_single_dpm_table *table) +{ + uint32_t i = 0; + + if (table->count <= MAX_REGULAR_DPM_NUMBER) { + for (i = table->count; i > 0; i--) { + if (table->dpm_levels[i - 1].enabled) + return i - 1; + } + } else { + pr_info("DPM Table Has Too Many Entries!"); + return MAX_REGULAR_DPM_NUMBER - 1; + } + + return i; +} + +static void vega10_apply_dal_minimum_voltage_request( + struct pp_hwmgr *hwmgr) +{ + return; +} + +static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + vega10_apply_dal_minimum_voltage_request(hwmgr); + + if (!data->registry_data.sclk_dpm_key_disabled) { + if (data->smc_state_table.gfx_boot_level != + data->dpm_table.gfx_table.dpm_state.soft_min_level) + data->dpm_table.gfx_table.dpm_state.soft_min_level = + data->smc_state_table.gfx_boot_level; + } + + if (!data->registry_data.mclk_dpm_key_disabled) { + if (data->smc_state_table.mem_boot_level != + data->dpm_table.mem_table.dpm_state.soft_min_level) + data->dpm_table.mem_table.dpm_state.soft_min_level = + data->smc_state_table.mem_boot_level; + } + + return 0; +} + +static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + vega10_apply_dal_minimum_voltage_request(hwmgr); + + if (!data->registry_data.sclk_dpm_key_disabled) { + if (data->smc_state_table.gfx_max_level != + data->dpm_table.gfx_table.dpm_state.soft_max_level) { + data->dpm_table.gfx_table.dpm_state.soft_max_level = + data->smc_state_table.gfx_max_level; + } + } + + if (!data->registry_data.mclk_dpm_key_disabled) { + if (data->smc_state_table.mem_max_level != + data->dpm_table.mem_table.dpm_state.soft_max_level) { + data->dpm_table.mem_table.dpm_state.soft_max_level = + data->smc_state_table.mem_max_level; + } + } + + return 0; +} + +static int vega10_generate_dpm_level_enable_mask( + struct pp_hwmgr *hwmgr, const void *input) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct vega10_power_state *vega10_ps = + cast_const_phw_vega10_power_state(states->pnew_state); + + PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps), + "Attempt to Trim DPM States Failed!", + return -1); + + data->smc_state_table.gfx_boot_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.gfx_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.mem_boot_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + data->smc_state_table.mem_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Attempt to upload DPM Bootup Levels Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Attempt to upload DPM Max Levels Failed!", + return -1); + + return 0; +} + +int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DPM_VCE].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + enable, + data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap), + "Attempt to Enable/Disable DPM VCE Failed!", + return -1); + data->smu_features[GNLD_DPM_VCE].enabled = enable; + } + + return 0; +} + +int vega10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) +{ + const struct phm_set_power_state_input *states = + (const struct phm_set_power_state_input *)input; + const struct vega10_power_state *vega10_nps = + cast_const_phw_vega10_power_state(states->pnew_state); + const struct vega10_power_state *vega10_cps = + cast_const_phw_vega10_power_state(states->pcurrent_state); + int result = 0; + + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEDPM)) + return 0; + + if (vega10_nps->vce_clks.evclk > 0 && + (vega10_cps == NULL || + vega10_cps->vce_clks.evclk == 0)) + result = vega10_enable_disable_vce_dpm(hwmgr, true); + else if (!vega10_nps->vce_clks.evclk && + (vega10_cps && vega10_cps->vce_clks.evclk)) + result = vega10_enable_disable_vce_dpm(hwmgr, false); + + return result; +} + +static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + int result = 0; + uint32_t low_sclk_interrupt_threshold = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkThrottleLowNotification) + && (hwmgr->gfx_arbiter.sclk_threshold != + data->low_sclk_interrupt_threshold)) { + data->low_sclk_interrupt_threshold = + hwmgr->gfx_arbiter.sclk_threshold; + low_sclk_interrupt_threshold = + data->low_sclk_interrupt_threshold; + + data->smc_state_table.pp_table.LowGfxclkInterruptThreshold = + cpu_to_le32(low_sclk_interrupt_threshold); + + /* This message will also enable SmcToHost Interrupt */ + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetLowGfxclkInterruptThreshold, + (uint32_t)low_sclk_interrupt_threshold); + } + + return result; +} + +static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, + const void *input) +{ + int tmp_result, result = 0; + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *pp_table = &(data->smc_state_table.pp_table); + + tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to find DPM states clocks in DPM table!", + result = tmp_result); + + tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to populate and upload SCLK MCLK DPM levels!", + result = tmp_result); + + tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to generate DPM level enabled mask!", + result = tmp_result); + + tmp_result = vega10_update_vce_dpm(hwmgr, input); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to update VCE DPM!", + result = tmp_result); + + tmp_result = vega10_update_sclk_threshold(hwmgr); + PP_ASSERT_WITH_CODE(!tmp_result, + "Failed to update SCLK threshold!", + result = tmp_result); + + result = vega10_copy_table_to_smc(hwmgr->smumgr, + (uint8_t *)pp_table, PPTABLE); + PP_ASSERT_WITH_CODE(!result, + "Failed to upload PPtable!", return result); + + data->apply_optimized_settings = false; + data->apply_overdrive_next_settings_mask = 0; + + return 0; +} + +static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct vega10_power_state *vega10_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + + if (low) + return vega10_ps->performance_levels[0].gfx_clock; + else + return vega10_ps->performance_levels + [vega10_ps->performance_level_count - 1].gfx_clock; +} + +static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low) +{ + struct pp_power_state *ps; + struct vega10_power_state *vega10_ps; + + if (hwmgr == NULL) + return -EINVAL; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + vega10_ps = cast_phw_vega10_power_state(&ps->hardware); + + if (low) + return vega10_ps->performance_levels[0].mem_clock; + else + return vega10_ps->performance_levels + [vega10_ps->performance_level_count-1].mem_clock; +} + +static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx, + void *value, int *size) +{ + uint32_t sclk_idx, mclk_idx, activity_percent = 0; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + struct vega10_dpm_table *dpm_table = &data->dpm_table; + int ret = 0; + + switch (idx) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex); + if (!ret) { + vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx); + *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value; + *size = 4; + } + break; + case AMDGPU_PP_SENSOR_GFX_MCLK: + ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex); + if (!ret) { + vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx); + *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value; + *size = 4; + } + break; + case AMDGPU_PP_SENSOR_GPU_LOAD: + ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity); + if (!ret) { + vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent); + + activity_percent += 0x80; + activity_percent >>= 8; + *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent; + *size = 4; + } + break; + case AMDGPU_PP_SENSOR_GPU_TEMP: + *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr); + *size = 4; + break; + case AMDGPU_PP_SENSOR_UVD_POWER: + *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1; + *size = 4; + break; + case AMDGPU_PP_SENSOR_VCE_POWER: + *((uint32_t *)value) = data->vce_power_gated ? 0 : 1; + *size = 4; + break; + default: + ret = -EINVAL; + break; + } + return ret; +} + +static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr, + bool has_disp) +{ + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetUclkFastSwitch, + has_disp ? 0 : 1); +} + +int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr, + struct pp_display_clock_request *clock_req) +{ + int result = 0; + enum amd_pp_clock_type clk_type = clock_req->clock_type; + uint32_t clk_freq = clock_req->clock_freq_in_khz / 100; + DSPCLK_e clk_select = 0; + uint32_t clk_request = 0; + + switch (clk_type) { + case amd_pp_dcef_clock: + clk_select = DSPCLK_DCEFCLK; + break; + case amd_pp_disp_clock: + clk_select = DSPCLK_DISPCLK; + break; + case amd_pp_pixel_clock: + clk_select = DSPCLK_PIXCLK; + break; + case amd_pp_phy_clock: + clk_select = DSPCLK_PHYCLK; + break; + default: + pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!"); + result = -1; + break; + } + + if (!result) { + clk_request = (clk_freq << 16) | clk_select; + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_RequestDisplayClockByFreq, + clk_request); + } + + return result; +} + +static int vega10_notify_smc_display_config_after_ps_adjustment( + struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct vega10_single_dpm_table *dpm_table = + &data->dpm_table.dcef_table; + uint32_t num_active_disps = 0; + struct cgs_display_info info = {0}; + struct PP_Clocks min_clocks = {0}; + uint32_t i; + struct pp_display_clock_request clock_req; + + info.mode_info = NULL; + + cgs_get_active_displays_info(hwmgr->device, &info); + + num_active_disps = info.display_count; + + if (num_active_disps > 1) + vega10_notify_smc_display_change(hwmgr, false); + else + vega10_notify_smc_display_change(hwmgr, true); + + min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk; + min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk; + + for (i = 0; i < dpm_table->count; i++) { + if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock) + break; + } + + if (i < dpm_table->count) { + clock_req.clock_type = amd_pp_dcef_clock; + clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value; + if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) { + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk, + min_clocks.dcefClockInSR), + "Attempt to set divider for DCEFCLK Failed!",); + } else + pr_info("Attempt to set Hard Min for DCEFCLK Failed!"); + } else + pr_info("Cannot find requested DCEFCLK!"); + + return 0; +} + +static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + data->smc_state_table.gfx_boot_level = + data->smc_state_table.gfx_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.mem_boot_level = + data->smc_state_table.mem_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Failed to upload boot level to highest!", + return -1); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Failed to upload dpm max level to highest!", + return -1); + + return 0; +} + +static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + data->smc_state_table.gfx_boot_level = + data->smc_state_table.gfx_max_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.mem_boot_level = + data->smc_state_table.mem_max_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Failed to upload boot level to highest!", + return -1); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Failed to upload dpm max level to highest!", + return -1); + + return 0; + +} + +static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + data->smc_state_table.gfx_boot_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.gfx_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table)); + data->smc_state_table.mem_boot_level = + vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table)); + data->smc_state_table.mem_max_level = + vega10_find_highest_dpm_level(&(data->dpm_table.mem_table)); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr), + "Failed to upload DPM Bootup Levels!", + return -1); + + PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr), + "Failed to upload DPM Max Levels!", + return -1); + return 0; +} + +static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, + enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_HIGH: + ret = vega10_force_dpm_highest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_LOW: + ret = vega10_force_dpm_lowest(hwmgr); + if (ret) + return ret; + break; + case AMD_DPM_FORCED_LEVEL_AUTO: + ret = vega10_unforce_dpm_levels(hwmgr); + if (ret) + return ret; + break; + default: + break; + } + + hwmgr->dpm_level = level; + + return ret; +} + +static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + if (mode) { + /* stop auto-manage */ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + vega10_fan_ctrl_stop_smc_fan_control(hwmgr); + vega10_fan_ctrl_set_static_mode(hwmgr, mode); + } else + /* restart auto-manage */ + vega10_fan_ctrl_reset_fan_speed_to_default(hwmgr); + + return 0; +} + +static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr) +{ + uint32_t reg; + + if (hwmgr->fan_ctrl_is_in_default_mode) + return hwmgr->fan_ctrl_default_mode; + else + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + return (cgs_read_register(hwmgr->device, reg) & + CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> + CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; +} + +static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr, + struct amd_pp_simple_clock_info *info) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_clock_and_voltage_limits *max_limits = + &table_info->max_clock_voltage_on_ac; + + info->engine_max_clock = max_limits->sclk; + info->memory_max_clock = max_limits->mclk; + + return 0; +} + +static void vega10_get_sclks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_sclk; + uint32_t i; + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = + dep_table->entries[i].clk; + clocks->num_levels++; + } + } + +} + +static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr, + uint32_t clock) +{ + if (clock >= MEM_FREQ_LOW_LATENCY && + clock < MEM_FREQ_HIGH_LATENCY) + return MEM_LATENCY_HIGH; + else if (clock >= MEM_FREQ_HIGH_LATENCY) + return MEM_LATENCY_LOW; + else + return MEM_LATENCY_ERR; +} + +static void vega10_get_memclocks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_mclk; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t i; + + clocks->num_levels = 0; + data->mclk_latency_table.count = 0; + + for (i = 0; i < dep_table->count; i++) { + if (dep_table->entries[i].clk) { + clocks->data[clocks->num_levels].clocks_in_khz = + data->mclk_latency_table.entries + [data->mclk_latency_table.count].frequency = + dep_table->entries[i].clk; + clocks->data[clocks->num_levels].latency_in_us = + data->mclk_latency_table.entries + [data->mclk_latency_table.count].latency = + vega10_get_mem_latency(hwmgr, + dep_table->entries[i].clk); + clocks->num_levels++; + data->mclk_latency_table.count++; + } + } +} + +static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_dcefclk; + uint32_t i; + + for (i = 0; i < dep_table->count; i++) { + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].latency_in_us = 0; + clocks->num_levels++; + } +} + +static void vega10_get_socclocks(struct pp_hwmgr *hwmgr, + struct pp_clock_levels_with_latency *clocks) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table = + table_info->vdd_dep_on_socclk; + uint32_t i; + + for (i = 0; i < dep_table->count; i++) { + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].latency_in_us = 0; + clocks->num_levels++; + } +} + +static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks) +{ + switch (type) { + case amd_pp_sys_clock: + vega10_get_sclks(hwmgr, clocks); + break; + case amd_pp_mem_clock: + vega10_get_memclocks(hwmgr, clocks); + break; + case amd_pp_dcef_clock: + vega10_get_dcefclocks(hwmgr, clocks); + break; + case amd_pp_soc_clock: + vega10_get_socclocks(hwmgr, clocks); + break; + default: + return -1; + } + + return 0; +} + +static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks) +{ + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *dep_table; + uint32_t i; + + switch (type) { + case amd_pp_mem_clock: + dep_table = table_info->vdd_dep_on_mclk; + break; + case amd_pp_dcef_clock: + dep_table = table_info->vdd_dep_on_dcefclk; + break; + case amd_pp_disp_clock: + dep_table = table_info->vdd_dep_on_dispclk; + break; + case amd_pp_pixel_clock: + dep_table = table_info->vdd_dep_on_pixclk; + break; + case amd_pp_phy_clock: + dep_table = table_info->vdd_dep_on_phyclk; + break; + default: + return -1; + } + + for (i = 0; i < dep_table->count; i++) { + clocks->data[i].clocks_in_khz = dep_table->entries[i].clk; + clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table-> + entries[dep_table->entries[i].vddInd].us_vdd); + clocks->num_levels++; + } + + if (i < dep_table->count) + return -1; + + return 0; +} + +static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, + struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + Watermarks_t *table = &(data->smc_state_table.water_marks_table); + int result = 0; + uint32_t i; + + if (!data->registry_data.disable_water_mark) { + for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) { + table->WatermarkRow[WM_DCEFCLK][i].MinClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) / + 100); + table->WatermarkRow[WM_DCEFCLK][i].MaxClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) / + 100); + table->WatermarkRow[WM_DCEFCLK][i].MinUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) / + 100); + table->WatermarkRow[WM_DCEFCLK][i].MaxUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) / + 100); + table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t) + wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id; + } + + for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) { + table->WatermarkRow[WM_SOCCLK][i].MinClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) / + 100); + table->WatermarkRow[WM_SOCCLK][i].MaxClock = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) / + 100); + table->WatermarkRow[WM_SOCCLK][i].MinUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) / + 100); + table->WatermarkRow[WM_SOCCLK][i].MaxUclk = + cpu_to_le16((uint16_t) + (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) / + 100); + table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t) + wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id; + } + data->water_marks_bitmap = WaterMarksExist; + } + + return result; +} + +static int vega10_force_clock_level(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, uint32_t mask) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t i; + + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (data->registry_data.sclk_dpm_key_disabled) + break; + + for (i = 0; i < 32; i++) { + if (mask & (1 << i)) + break; + } + + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_SetSoftMinGfxclkByIndex, + i), + "Failed to set soft min sclk index!", + return -1); + break; + + case PP_MCLK: + if (data->registry_data.mclk_dpm_key_disabled) + break; + + for (i = 0; i < 32; i++) { + if (mask & (1 << i)) + break; + } + + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_SetSoftMinUclkByIndex, + i), + "Failed to set soft min mclk index!", + return -1); + break; + + case PP_PCIE: + if (data->registry_data.pcie_dpm_key_disabled) + break; + + for (i = 0; i < 32; i++) { + if (mask & (1 << i)) + break; + } + + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter( + hwmgr->smumgr, + PPSMC_MSG_SetMinLinkDpmByIndex, + i), + "Failed to set min pcie index!", + return -1); + break; + default: + break; + } + + return 0; +} + +static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, + enum pp_clock_type type, char *buf) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); + struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); + struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); + int i, now, size = 0; + + switch (type) { + case PP_SCLK: + if (data->registry_data.sclk_dpm_key_disabled) + break; + + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_GetCurrentGfxclkIndex), + "Attempt to get current sclk index Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + &now), + "Attempt to read sclk index Failed!", + return -1); + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + if (data->registry_data.mclk_dpm_key_disabled) + break; + + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_GetCurrentUclkIndex), + "Attempt to get current mclk index Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + &now), + "Attempt to read mclk index Failed!", + return -1); + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_GetCurrentLinkIndex), + "Attempt to get current mclk index Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + &now), + "Attempt to read mclk index Failed!", + return -1); + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->pcie_gen[i] == 0) ? "2.5GB, x1" : + (pcie_table->pcie_gen[i] == 1) ? "5.0GB, x16" : + (pcie_table->pcie_gen[i] == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + return size; +} + +static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + int result = 0; + uint32_t num_turned_on_displays = 1; + Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table); + struct cgs_display_info info = {0}; + + cgs_get_active_displays_info(hwmgr->device, &info); + num_turned_on_displays = info.display_count; + + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_NumOfDisplays, num_turned_on_displays); + + if ((data->water_marks_bitmap & WaterMarksExist) && + !(data->water_marks_bitmap & WaterMarksLoaded)) { + result = vega10_copy_table_to_smc(hwmgr->smumgr, + (uint8_t *)wm_table, WMTABLE); + PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL); + data->water_marks_bitmap |= WaterMarksLoaded; + } + + return result; +} + +int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_DPM_UVD].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + enable, + data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap), + "Attempt to Enable/Disable DPM UVD Failed!", + return -1); + data->smu_features[GNLD_DPM_UVD].enabled = enable; + } + return 0; +} + +static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + data->vce_power_gated = bgate; + return vega10_enable_disable_vce_dpm(hwmgr, !bgate); +} + +static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + data->uvd_power_gated = bgate; + return vega10_enable_disable_uvd_dpm(hwmgr, !bgate); +} + + +static const struct pp_hwmgr_func vega10_hwmgr_funcs = { + .backend_init = vega10_hwmgr_backend_init, + .backend_fini = vega10_hwmgr_backend_fini, + .asic_setup = vega10_setup_asic_task, + .dynamic_state_management_enable = vega10_enable_dpm_tasks, + .get_num_of_pp_table_entries = + vega10_get_number_of_powerplay_table_entries, + .get_power_state_size = vega10_get_power_state_size, + .get_pp_table_entry = vega10_get_pp_table_entry, + .patch_boot_state = vega10_patch_boot_state, + .apply_state_adjust_rules = vega10_apply_state_adjust_rules, + .power_state_set = vega10_set_power_state_tasks, + .get_sclk = vega10_dpm_get_sclk, + .get_mclk = vega10_dpm_get_mclk, + .notify_smc_display_config_after_ps_adjustment = + vega10_notify_smc_display_config_after_ps_adjustment, + .force_dpm_level = vega10_dpm_force_dpm_level, + .get_temperature = vega10_thermal_get_temperature, + .stop_thermal_controller = vega10_thermal_stop_thermal_controller, + .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info, + .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent, + .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent, + .reset_fan_speed_to_default = + vega10_fan_ctrl_reset_fan_speed_to_default, + .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm, + .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm, + .uninitialize_thermal_controller = + vega10_thermal_ctrl_uninitialize_thermal_controller, + .set_fan_control_mode = vega10_set_fan_control_mode, + .get_fan_control_mode = vega10_get_fan_control_mode, + .read_sensor = vega10_read_sensor, + .get_dal_power_level = vega10_get_dal_power_level, + .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency, + .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage, + .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges, + .display_clock_voltage_request = vega10_display_clock_voltage_request, + .force_clock_level = vega10_force_clock_level, + .print_clock_levels = vega10_print_clock_levels, + .display_config_changed = vega10_display_configuration_changed_task, + .powergate_uvd = vega10_power_gate_uvd, + .powergate_vce = vega10_power_gate_vce, +}; + +int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) +{ + hwmgr->hwmgr_func = &vega10_hwmgr_funcs; + hwmgr->pptable_func = &vega10_pptable_funcs; + pp_vega10_thermal_initialize(hwmgr); + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h new file mode 100644 index 0000000..83c67b9 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -0,0 +1,434 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _VEGA10_HWMGR_H_ +#define _VEGA10_HWMGR_H_ + +#include "hwmgr.h" +#include "smu9_driver_if.h" +#include "ppatomctrl.h" +#include "ppatomfwctrl.h" +#include "vega10_ppsmc.h" +#include "vega10_powertune.h" + +extern const uint32_t PhwVega10_Magic; +#define VEGA10_MAX_HARDWARE_POWERLEVELS 2 + +#define WaterMarksExist 1 +#define WaterMarksLoaded 2 + +enum { + GNLD_DPM_PREFETCHER = 0, + GNLD_DPM_GFXCLK, + GNLD_DPM_UCLK, + GNLD_DPM_SOCCLK, + GNLD_DPM_UVD, + GNLD_DPM_VCE, + GNLD_ULV, + GNLD_DPM_MP0CLK, + GNLD_DPM_LINK, + GNLD_DPM_DCEFCLK, + GNLD_AVFS, + GNLD_DS_GFXCLK, + GNLD_DS_SOCCLK, + GNLD_DS_LCLK, + GNLD_PPT, + GNLD_TDC, + GNLD_THERMAL, + GNLD_GFX_PER_CU_CG, + GNLD_RM, + GNLD_DS_DCEFCLK, + GNLD_ACDC, + GNLD_VR0HOT, + GNLD_VR1HOT, + GNLD_FW_CTF, + GNLD_LED_DISPLAY, + GNLD_FAN_CONTROL, + GNLD_VOLTAGE_CONTROLLER, + GNLD_FEATURES_MAX +}; + +#define GNLD_DPM_MAX (GNLD_DPM_DCEFCLK + 1) + +#define SMC_DPM_FEATURES 0x30F + +struct smu_features { + bool supported; + bool enabled; + uint32_t smu_feature_id; + uint32_t smu_feature_bitmap; +}; + +struct vega10_performance_level { + uint32_t soc_clock; + uint32_t gfx_clock; + uint32_t mem_clock; +}; + +struct vega10_bacos { + uint32_t baco_flags; + /* struct vega10_performance_level performance_level; */ +}; + +struct vega10_uvd_clocks { + uint32_t vclk; + uint32_t dclk; +}; + +struct vega10_vce_clocks { + uint32_t evclk; + uint32_t ecclk; +}; + +struct vega10_power_state { + uint32_t magic; + struct vega10_uvd_clocks uvd_clks; + struct vega10_vce_clocks vce_clks; + uint16_t performance_level_count; + bool dc_compatible; + uint32_t sclk_threshold; + struct vega10_performance_level performance_levels[VEGA10_MAX_HARDWARE_POWERLEVELS]; +}; + +struct vega10_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; + +#define VEGA10_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define MAX_REGULAR_DPM_NUMBER 8 +#define MAX_PCIE_CONF 2 +#define VEGA10_MINIMUM_ENGINE_CLOCK 2500 + +struct vega10_dpm_state { + uint32_t soft_min_level; + uint32_t soft_max_level; + uint32_t hard_min_level; + uint32_t hard_max_level; +}; + +struct vega10_single_dpm_table { + uint32_t count; + struct vega10_dpm_state dpm_state; + struct vega10_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega10_pcie_table { + uint16_t count; + uint8_t pcie_gen[MAX_PCIE_CONF]; + uint8_t pcie_lane[MAX_PCIE_CONF]; + uint32_t lclk[MAX_PCIE_CONF]; +}; + +struct vega10_dpm_table { + struct vega10_single_dpm_table soc_table; + struct vega10_single_dpm_table gfx_table; + struct vega10_single_dpm_table mem_table; + struct vega10_single_dpm_table eclk_table; + struct vega10_single_dpm_table vclk_table; + struct vega10_single_dpm_table dclk_table; + struct vega10_single_dpm_table dcef_table; + struct vega10_single_dpm_table pixel_table; + struct vega10_single_dpm_table display_table; + struct vega10_single_dpm_table phy_table; + struct vega10_pcie_table pcie_table; +}; + +#define VEGA10_MAX_LEAKAGE_COUNT 8 +struct vega10_leakage_voltage { + uint16_t count; + uint16_t leakage_id[VEGA10_MAX_LEAKAGE_COUNT]; + uint16_t actual_voltage[VEGA10_MAX_LEAKAGE_COUNT]; +}; + +struct vega10_display_timing { + uint32_t min_clock_in_sr; + uint32_t num_existing_displays; +}; + +struct vega10_dpmlevel_enable_mask { + uint32_t uvd_dpm_enable_mask; + uint32_t vce_dpm_enable_mask; + uint32_t acp_dpm_enable_mask; + uint32_t samu_dpm_enable_mask; + uint32_t sclk_dpm_enable_mask; + uint32_t mclk_dpm_enable_mask; +}; + +struct vega10_vbios_boot_state { + uint16_t vddc; + uint16_t vddci; + uint32_t gfx_clock; + uint32_t mem_clock; + uint32_t soc_clock; +}; + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 +#define DPMTABLE_OD_UPDATE_VDDC 0x00000010 + +struct vega10_smc_state_table { + uint32_t soc_boot_level; + uint32_t gfx_boot_level; + uint32_t dcef_boot_level; + uint32_t mem_boot_level; + uint32_t uvd_boot_level; + uint32_t vce_boot_level; + uint32_t gfx_max_level; + uint32_t mem_max_level; + uint8_t vr_hot_gpio; + uint8_t ac_dc_gpio; + uint8_t therm_out_gpio; + uint8_t therm_out_polarity; + uint8_t therm_out_mode; + PPTable_t pp_table; + Watermarks_t water_marks_table; + AvfsTable_t avfs_table; +}; + +struct vega10_mclk_latency_entries { + uint32_t frequency; + uint32_t latency; +}; + +struct vega10_mclk_latency_table { + uint32_t count; + struct vega10_mclk_latency_entries entries[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega10_registry_data { + uint8_t ac_dc_switch_gpio_support; + uint8_t avfs_support; + uint8_t cac_support; + uint8_t clock_stretcher_support; + uint8_t db_ramping_support; + uint8_t didt_support; + uint8_t dynamic_state_patching_support; + uint8_t enable_pkg_pwr_tracking_feature; + uint8_t enable_tdc_limit_feature; + uint32_t fast_watermark_threshold; + uint8_t force_dpm_high; + uint8_t fuzzy_fan_control_support; + uint8_t long_idle_baco_support; + uint8_t mclk_dpm_key_disabled; + uint8_t od_state_in_dc_support; + uint8_t pcieLaneOverride; + uint8_t pcieSpeedOverride; + uint32_t pcieClockOverride; + uint8_t pcie_dpm_key_disabled; + uint8_t dcefclk_dpm_key_disabled; + uint8_t power_containment_support; + uint8_t ppt_support; + uint8_t prefetcher_dpm_key_disabled; + uint8_t quick_transition_support; + uint8_t regulator_hot_gpio_support; + uint8_t sclk_deep_sleep_support; + uint8_t sclk_dpm_key_disabled; + uint8_t sclk_from_vbios; + uint8_t sclk_throttle_low_notification; + uint8_t show_baco_dbg_info; + uint8_t skip_baco_hardware; + uint8_t socclk_dpm_key_disabled; + uint8_t spll_shutdown_support; + uint8_t sq_ramping_support; + uint32_t stable_pstate_sclk_dpm_percentage; + uint8_t tcp_ramping_support; + uint8_t tdc_support; + uint8_t td_ramping_support; + uint8_t thermal_out_gpio_support; + uint8_t thermal_support; + uint8_t fw_ctf_enabled; + uint8_t fan_control_support; + uint8_t ulps_support; + uint8_t ulv_support; + uint32_t vddc_vddci_delta; + uint8_t odn_feature_enable; + uint8_t disable_water_mark; + uint8_t zrpm_stop_temp; + uint8_t zrpm_start_temp; + uint8_t led_dpm_enabled; + uint8_t vr0hot_enabled; + uint8_t vr1hot_enabled; +}; + +struct vega10_odn_clock_voltage_dependency_table { + uint32_t count; + struct phm_ppt_v1_clock_voltage_dependency_record + entries[MAX_REGULAR_DPM_NUMBER]; +}; + +struct vega10_odn_dpm_table { + struct phm_odn_clock_levels odn_core_clock_dpm_levels; + struct phm_odn_clock_levels odn_memory_clock_dpm_levels; + struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_sclk; + struct vega10_odn_clock_voltage_dependency_table vdd_dependency_on_mclk; +}; + +struct vega10_odn_fan_table { + uint32_t target_fan_speed; + uint32_t target_temperature; + uint32_t min_performance_clock; + uint32_t min_fan_limit; +}; + +struct vega10_hwmgr { + struct vega10_dpm_table dpm_table; + struct vega10_dpm_table golden_dpm_table; + struct vega10_registry_data registry_data; + struct vega10_vbios_boot_state vbios_boot_state; + struct vega10_mclk_latency_table mclk_latency_table; + + struct vega10_leakage_voltage vddc_leakage; + + uint32_t vddc_control; + struct pp_atomfwctrl_voltage_table vddc_voltage_table; + uint32_t mvdd_control; + struct pp_atomfwctrl_voltage_table mvdd_voltage_table; + uint32_t vddci_control; + struct pp_atomfwctrl_voltage_table vddci_voltage_table; + + uint32_t active_auto_throttle_sources; + uint32_t water_marks_bitmap; + struct vega10_bacos bacos; + + struct vega10_odn_dpm_table odn_dpm_table; + struct vega10_odn_fan_table odn_fan_table; + + /* ---- General data ---- */ + uint8_t need_update_dpm_table; + + bool cac_enabled; + bool battery_state; + bool is_tlu_enabled; + + uint32_t low_sclk_interrupt_threshold; + + uint32_t total_active_cus; + + struct vega10_display_timing display_timing; + + /* ---- Vega10 Dyn Register Settings ---- */ + + uint32_t debug_settings; + uint32_t lowest_uclk_reserved_for_ulv; + uint32_t gfxclk_average_alpha; + uint32_t socclk_average_alpha; + uint32_t uclk_average_alpha; + uint32_t gfx_activity_average_alpha; + uint32_t display_voltage_mode; + uint32_t dcef_clk_quad_eqn_a; + uint32_t dcef_clk_quad_eqn_b; + uint32_t dcef_clk_quad_eqn_c; + uint32_t disp_clk_quad_eqn_a; + uint32_t disp_clk_quad_eqn_b; + uint32_t disp_clk_quad_eqn_c; + uint32_t pixel_clk_quad_eqn_a; + uint32_t pixel_clk_quad_eqn_b; + uint32_t pixel_clk_quad_eqn_c; + uint32_t phy_clk_quad_eqn_a; + uint32_t phy_clk_quad_eqn_b; + uint32_t phy_clk_quad_eqn_c; + + /* ---- Thermal Temperature Setting ---- */ + struct vega10_dpmlevel_enable_mask dpm_level_enable_mask; + + /* ---- Power Gating States ---- */ + bool uvd_power_gated; + bool vce_power_gated; + bool samu_power_gated; + bool need_long_memory_training; + + /* Internal settings to apply the application power optimization parameters */ + bool apply_optimized_settings; + uint32_t disable_dpm_mask; + + /* ---- Overdrive next setting ---- */ + uint32_t apply_overdrive_next_settings_mask; + + /* ---- Workload Mask ---- */ + uint32_t workload_mask; + + /* ---- SMU9 ---- */ + struct smu_features smu_features[GNLD_FEATURES_MAX]; + struct vega10_smc_state_table smc_state_table; + + uint32_t config_telemetry; +}; + +#define VEGA10_DPM2_NEAR_TDP_DEC 10 +#define VEGA10_DPM2_ABOVE_SAFE_INC 5 +#define VEGA10_DPM2_BELOW_SAFE_INC 20 + +#define VEGA10_DPM2_LTA_WINDOW_SIZE 7 + +#define VEGA10_DPM2_LTS_TRUNCATE 0 + +#define VEGA10_DPM2_TDP_SAFE_LIMIT_PERCENT 80 + +#define VEGA10_DPM2_MAXPS_PERCENT_M 90 +#define VEGA10_DPM2_MAXPS_PERCENT_H 90 + +#define VEGA10_DPM2_PWREFFICIENCYRATIO_MARGIN 50 + +#define VEGA10_DPM2_SQ_RAMP_MAX_POWER 0x3FFF +#define VEGA10_DPM2_SQ_RAMP_MIN_POWER 0x12 +#define VEGA10_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 +#define VEGA10_DPM2_SQ_RAMP_SHORT_TERM_INTERVAL_SIZE 0x1E +#define VEGA10_DPM2_SQ_RAMP_LONG_TERM_INTERVAL_RATIO 0xF + +#define VEGA10_VOLTAGE_CONTROL_NONE 0x0 +#define VEGA10_VOLTAGE_CONTROL_BY_GPIO 0x1 +#define VEGA10_VOLTAGE_CONTROL_BY_SVID2 0x2 +#define VEGA10_VOLTAGE_CONTROL_MERGED 0x3 +/* To convert to Q8.8 format for firmware */ +#define VEGA10_Q88_FORMAT_CONVERSION_UNIT 256 + +#define VEGA10_UNUSED_GPIO_PIN 0x7F + +#define VEGA10_THERM_OUT_MODE_DISABLE 0x0 +#define VEGA10_THERM_OUT_MODE_THERM_ONLY 0x1 +#define VEGA10_THERM_OUT_MODE_THERM_VRHOT 0x2 + +#define PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT 0xffffffff +#define PPREGKEY_VEGA10QUADRATICEQUATION_DFLT 0xffffffff + +#define PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ +#define PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT 25 /* 10% * 255 = 25 */ + +extern int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr); +extern int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr); +extern int tonga_get_mc_microcode_version (struct pp_hwmgr *hwmgr); +extern int tonga_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); +extern int tonga_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display); +int vega10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input); +int vega10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); +int vega10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); +int vega10_update_acp_dpm(struct pp_hwmgr *hwmgr, bool bgate); +int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); + +#endif /* _VEGA10_HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h new file mode 100644 index 0000000..8c55eaa --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_inc.h @@ -0,0 +1,44 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA10_INC_H +#define VEGA10_INC_H + +#include "asic_reg/vega10/THM/thm_9_0_default.h" +#include "asic_reg/vega10/THM/thm_9_0_offset.h" +#include "asic_reg/vega10/THM/thm_9_0_sh_mask.h" + +#include "asic_reg/vega10/MP/mp_9_0_default.h" +#include "asic_reg/vega10/MP/mp_9_0_offset.h" +#include "asic_reg/vega10/MP/mp_9_0_sh_mask.h" + +#include "asic_reg/vega10/GC/gc_9_0_default.h" +#include "asic_reg/vega10/GC/gc_9_0_offset.h" +#include "asic_reg/vega10/GC/gc_9_0_sh_mask.h" + +#include "asic_reg/vega10/NBIO/nbio_6_1_default.h" +#include "asic_reg/vega10/NBIO/nbio_6_1_offset.h" +#include "asic_reg/vega10/NBIO/nbio_6_1_sh_mask.h" + + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c new file mode 100644 index 0000000..f1e244c --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c @@ -0,0 +1,137 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "hwmgr.h" +#include "vega10_hwmgr.h" +#include "vega10_powertune.h" +#include "vega10_smumgr.h" +#include "vega10_ppsmc.h" +#include "pp_debug.h" + +void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_tdp_table *tdp_table = table_info->tdp_table; + PPTable_t *table = &(data->smc_state_table.pp_table); + + table->SocketPowerLimit = cpu_to_le16( + tdp_table->usMaximumPowerDeliveryLimit); + table->TdcLimit = cpu_to_le16(tdp_table->usTDC); + table->EdcLimit = cpu_to_le16(tdp_table->usEDCLimit); + table->TedgeLimit = cpu_to_le16(tdp_table->usTemperatureLimitTedge); + table->ThotspotLimit = cpu_to_le16(tdp_table->usTemperatureLimitHotspot); + table->ThbmLimit = cpu_to_le16(tdp_table->usTemperatureLimitHBM); + table->Tvr_socLimit = cpu_to_le16(tdp_table->usTemperatureLimitVrVddc); + table->Tvr_memLimit = cpu_to_le16(tdp_table->usTemperatureLimitVrMvdd); + table->Tliquid1Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid1); + table->Tliquid2Limit = cpu_to_le16(tdp_table->usTemperatureLimitLiquid2); + table->TplxLimit = cpu_to_le16(tdp_table->usTemperatureLimitPlx); + table->LoadLineResistance = cpu_to_le16( + hwmgr->platform_descriptor.LoadLineSlope); + table->FitLimit = 0; /* Not used for Vega10 */ + + table->Liquid1_I2C_address = tdp_table->ucLiquid1_I2C_address; + table->Liquid2_I2C_address = tdp_table->ucLiquid2_I2C_address; + table->Vr_I2C_address = tdp_table->ucVr_I2C_address; + table->Plx_I2C_address = tdp_table->ucPlx_I2C_address; + + table->Liquid_I2C_LineSCL = tdp_table->ucLiquid_I2C_Line; + table->Liquid_I2C_LineSDA = tdp_table->ucLiquid_I2C_LineSDA; + + table->Vr_I2C_LineSCL = tdp_table->ucVr_I2C_Line; + table->Vr_I2C_LineSDA = tdp_table->ucVr_I2C_LineSDA; + + table->Plx_I2C_LineSCL = tdp_table->ucPlx_I2C_Line; + table->Plx_I2C_LineSDA = tdp_table->ucPlx_I2C_LineSDA; +} + +int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->registry_data.enable_pkg_pwr_tracking_feature) + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetPptLimit, n); + + return 0; +} + +int vega10_enable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = + (struct vega10_hwmgr *)(hwmgr->backend); + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + struct phm_tdp_table *tdp_table = table_info->tdp_table; + uint32_t default_pwr_limit = + (uint32_t)(tdp_table->usMaximumPowerDeliveryLimit); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + if (data->smu_features[GNLD_PPT].supported) + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_PPT].smu_feature_bitmap), + "Attempt to enable PPT feature Failed!", + data->smu_features[GNLD_PPT].supported = false); + + if (data->smu_features[GNLD_TDC].supported) + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, data->smu_features[GNLD_TDC].smu_feature_bitmap), + "Attempt to enable PPT feature Failed!", + data->smu_features[GNLD_TDC].supported = false); + + result = vega10_set_power_limit(hwmgr, default_pwr_limit); + PP_ASSERT_WITH_CODE(!result, + "Failed to set Default Power Limit in SMC!", + return result); + } + + return result; +} + +static int vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr, + uint32_t adjust_percent) +{ + return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_OverDriveSetPercentage, adjust_percent); +} + +int vega10_power_control_set_level(struct pp_hwmgr *hwmgr) +{ + int adjust_percent, result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment)) { + adjust_percent = + hwmgr->platform_descriptor.TDPAdjustmentPolarity ? + hwmgr->platform_descriptor.TDPAdjustment : + (-1 * hwmgr->platform_descriptor.TDPAdjustment); + result = vega10_set_overdrive_target_percentage(hwmgr, + (uint32_t)adjust_percent); + } + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h new file mode 100644 index 0000000..d9662bf --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h @@ -0,0 +1,65 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _VEGA10_POWERTUNE_H_ +#define _VEGA10_POWERTUNE_H_ + +enum vega10_pt_config_reg_type { + VEGA10_CONFIGREG_MMR = 0, + VEGA10_CONFIGREG_SMC_IND, + VEGA10_CONFIGREG_DIDT_IND, + VEGA10_CONFIGREG_CACHE, + VEGA10_CONFIGREG_MAX +}; + +/* PowerContainment Features */ +#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 +#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 +#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 + +struct vega10_pt_config_reg { + uint32_t offset; + uint32_t mask; + uint32_t shift; + uint32_t value; + enum vega10_pt_config_reg_type type; +}; + +struct vega10_pt_defaults { + uint8_t SviLoadLineEn; + uint8_t SviLoadLineVddC; + uint8_t TDC_VDDC_ThrottleReleaseLimitPerc; + uint8_t TDC_MAWt; + uint8_t TdcWaterfallCtl; + uint8_t DTEAmbientTempBase; +}; + +void vega10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); +int vega10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); +int vega10_populate_pm_fuses(struct pp_hwmgr *hwmgr); +int vega10_enable_smc_cac(struct pp_hwmgr *hwmgr); +int vega10_enable_power_containment(struct pp_hwmgr *hwmgr); +int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); +int vega10_power_control_set_level(struct pp_hwmgr *hwmgr); + +#endif /* _VEGA10_POWERTUNE_H_ */ + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h new file mode 100644 index 0000000..8e53d3a --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h @@ -0,0 +1,331 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _VEGA10_PPTABLE_H_ +#define _VEGA10_PPTABLE_H_ + +#pragma pack(push, 1) + +#define ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f +#define ATOM_VEGA10_PP_FANPARAMETERS_NOFAN 0x80 + +#define ATOM_VEGA10_PP_THERMALCONTROLLER_NONE 0 +#define ATOM_VEGA10_PP_THERMALCONTROLLER_LM96163 17 +#define ATOM_VEGA10_PP_THERMALCONTROLLER_VEGA10 24 + +#define ATOM_VEGA10_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 +#define ATOM_VEGA10_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D + +#define ATOM_VEGA10_PP_PLATFORM_CAP_POWERPLAY 0x1 +#define ATOM_VEGA10_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 +#define ATOM_VEGA10_PP_PLATFORM_CAP_HARDWAREDC 0x4 +#define ATOM_VEGA10_PP_PLATFORM_CAP_BACO 0x8 +#define ATOM_VEGA10_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL 0x10 + + +/* ATOM_PPLIB_NONCLOCK_INFO::usClassification */ +#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 +#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0 +#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1 +#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3 +#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5 +/* 2, 4, 6, 7 are reserved */ + +#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008 +#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010 +#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020 +#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040 +#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080 +#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 + +/* ATOM_PPLIB_NONCLOCK_INFO::usClassification2 */ +#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001 + +#define ATOM_Vega10_DISALLOW_ON_DC 0x00004000 +#define ATOM_Vega10_ENABLE_VARIBRIGHT 0x00008000 + +#define ATOM_Vega10_TABLE_REVISION_VEGA10 8 + +#define ATOM_Vega10_VoltageMode_AVFS_Interpolate 0 +#define ATOM_Vega10_VoltageMode_AVFS_WorstCase 1 +#define ATOM_Vega10_VoltageMode_Static 2 + +typedef struct _ATOM_Vega10_POWERPLAYTABLE { + struct atom_common_table_header sHeader; + UCHAR ucTableRevision; + USHORT usTableSize; /* the size of header structure */ + ULONG ulGoldenPPID; /* PPGen use only */ + ULONG ulGoldenRevision; /* PPGen use only */ + USHORT usFormatID; /* PPGen use only */ + ULONG ulPlatformCaps; /* See ATOM_Vega10_CAPS_* */ + ULONG ulMaxODEngineClock; /* For Overdrive. */ + ULONG ulMaxODMemoryClock; /* For Overdrive. */ + USHORT usPowerControlLimit; + USHORT usUlvVoltageOffset; /* in mv units */ + USHORT usUlvSmnclkDid; + USHORT usUlvMp1clkDid; + USHORT usUlvGfxclkBypass; + USHORT usGfxclkSlewRate; + UCHAR ucGfxVoltageMode; + UCHAR ucSocVoltageMode; + UCHAR ucUclkVoltageMode; + UCHAR ucUvdVoltageMode; + UCHAR ucVceVoltageMode; + UCHAR ucMp0VoltageMode; + UCHAR ucDcefVoltageMode; + USHORT usStateArrayOffset; /* points to ATOM_Vega10_State_Array */ + USHORT usFanTableOffset; /* points to ATOM_Vega10_Fan_Table */ + USHORT usThermalControllerOffset; /* points to ATOM_Vega10_Thermal_Controller */ + USHORT usSocclkDependencyTableOffset; /* points to ATOM_Vega10_SOCCLK_Dependency_Table */ + USHORT usMclkDependencyTableOffset; /* points to ATOM_Vega10_MCLK_Dependency_Table */ + USHORT usGfxclkDependencyTableOffset; /* points to ATOM_Vega10_GFXCLK_Dependency_Table */ + USHORT usDcefclkDependencyTableOffset; /* points to ATOM_Vega10_DCEFCLK_Dependency_Table */ + USHORT usVddcLookupTableOffset; /* points to ATOM_Vega10_Voltage_Lookup_Table */ + USHORT usVddmemLookupTableOffset; /* points to ATOM_Vega10_Voltage_Lookup_Table */ + USHORT usMMDependencyTableOffset; /* points to ATOM_Vega10_MM_Dependency_Table */ + USHORT usVCEStateTableOffset; /* points to ATOM_Vega10_VCE_State_Table */ + USHORT usReserve; /* No PPM Support for Vega10 */ + USHORT usPowerTuneTableOffset; /* points to ATOM_Vega10_PowerTune_Table */ + USHORT usHardLimitTableOffset; /* points to ATOM_Vega10_Hard_Limit_Table */ + USHORT usVddciLookupTableOffset; /* points to ATOM_Vega10_Voltage_Lookup_Table */ + USHORT usPCIETableOffset; /* points to ATOM_Vega10_PCIE_Table */ + USHORT usPixclkDependencyTableOffset; /* points to ATOM_Vega10_PIXCLK_Dependency_Table */ + USHORT usDispClkDependencyTableOffset; /* points to ATOM_Vega10_DISPCLK_Dependency_Table */ + USHORT usPhyClkDependencyTableOffset; /* points to ATOM_Vega10_PHYCLK_Dependency_Table */ +} ATOM_Vega10_POWERPLAYTABLE; + +typedef struct _ATOM_Vega10_State { + UCHAR ucSocClockIndexHigh; + UCHAR ucSocClockIndexLow; + UCHAR ucGfxClockIndexHigh; + UCHAR ucGfxClockIndexLow; + UCHAR ucMemClockIndexHigh; + UCHAR ucMemClockIndexLow; + USHORT usClassification; + ULONG ulCapsAndSettings; + USHORT usClassification2; +} ATOM_Vega10_State; + +typedef struct _ATOM_Vega10_State_Array { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_State states[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_State_Array; + +typedef struct _ATOM_Vega10_CLK_Dependency_Record { + ULONG ulClk; /* Frequency of Clock */ + UCHAR ucVddInd; /* Base voltage */ +} ATOM_Vega10_CLK_Dependency_Record; + +typedef struct _ATOM_Vega10_GFXCLK_Dependency_Record { + ULONG ulClk; /* Clock Frequency */ + UCHAR ucVddInd; /* SOC_VDD index */ + USHORT usCKSVOffsetandDisable; /* Bits 0~30: Voltage offset for CKS, Bit 31: Disable/enable for the GFXCLK level. */ + USHORT usAVFSOffset; /* AVFS Voltage offset */ +} ATOM_Vega10_GFXCLK_Dependency_Record; + +typedef struct _ATOM_Vega10_MCLK_Dependency_Record { + ULONG ulMemClk; /* Clock Frequency */ + UCHAR ucVddInd; /* SOC_VDD index */ + UCHAR ucVddMemInd; /* MEM_VDD - only non zero for MCLK record */ + UCHAR ucVddciInd; /* VDDCI = only non zero for MCLK record */ +} ATOM_Vega10_MCLK_Dependency_Record; + +typedef struct _ATOM_Vega10_GFXCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_GFXCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_GFXCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_MCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_MCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_MCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_SOCCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_SOCCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_DCEFCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_DCEFCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_PIXCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_PIXCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_DISPCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries.*/ + ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_DISPCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_PHYCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Vega10_CLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_PHYCLK_Dependency_Table; + +typedef struct _ATOM_Vega10_MM_Dependency_Record { + UCHAR ucVddcInd; /* SOC_VDD voltage */ + ULONG ulDClk; /* UVD D-clock */ + ULONG ulVClk; /* UVD V-clock */ + ULONG ulEClk; /* VCE clock */ + ULONG ulPSPClk; /* PSP clock */ +} ATOM_Vega10_MM_Dependency_Record; + +typedef struct _ATOM_Vega10_MM_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries */ + ATOM_Vega10_MM_Dependency_Record entries[1]; /* Dynamically allocate entries */ +} ATOM_Vega10_MM_Dependency_Table; + +typedef struct _ATOM_Vega10_PCIE_Record { + ULONG ulLCLK; /* LClock */ + UCHAR ucPCIEGenSpeed; /* PCIE Speed */ + UCHAR ucPCIELaneWidth; /* PCIE Lane Width */ +} ATOM_Vega10_PCIE_Record; + +typedef struct _ATOM_Vega10_PCIE_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries */ + ATOM_Vega10_PCIE_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Vega10_PCIE_Table; + +typedef struct _ATOM_Vega10_Voltage_Lookup_Record { + USHORT usVdd; /* Base voltage */ +} ATOM_Vega10_Voltage_Lookup_Record; + +typedef struct _ATOM_Vega10_Voltage_Lookup_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries */ + ATOM_Vega10_Voltage_Lookup_Record entries[1]; /* Dynamically allocate entries */ +} ATOM_Vega10_Voltage_Lookup_Table; + +typedef struct _ATOM_Vega10_Fan_Table { + UCHAR ucRevId; /* Change this if the table format changes or version changes so that the other fields are not the same. */ + USHORT usFanOutputSensitivity; /* Sensitivity of fan reaction to temepature changes. */ + USHORT usFanRPMMax; /* The default value in RPM. */ + USHORT usThrottlingRPM; + USHORT usFanAcousticLimit; /* Minimum Fan Controller Frequency Acoustic Limit. */ + USHORT usTargetTemperature; /* The default ideal temperature in Celcius. */ + USHORT usMinimumPWMLimit; /* The minimum PWM that the advanced fan controller can set. */ + USHORT usTargetGfxClk; /* The ideal Fan Controller GFXCLK Frequency Acoustic Limit. */ + USHORT usFanGainEdge; + USHORT usFanGainHotspot; + USHORT usFanGainLiquid; + USHORT usFanGainVrVddc; + USHORT usFanGainVrMvdd; + USHORT usFanGainPlx; + USHORT usFanGainHbm; + UCHAR ucEnableZeroRPM; + USHORT usFanStopTemperature; + USHORT usFanStartTemperature; +} ATOM_Vega10_Fan_Table; + +typedef struct _ATOM_Vega10_Thermal_Controller { + UCHAR ucRevId; + UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/ + UCHAR ucI2cLine; /* as interpreted by DAL I2C */ + UCHAR ucI2cAddress; + UCHAR ucFanParameters; /* Fan Control Parameters. */ + UCHAR ucFanMinRPM; /* Fan Minimum RPM (hundreds) -- for display purposes only.*/ + UCHAR ucFanMaxRPM; /* Fan Maximum RPM (hundreds) -- for display purposes only.*/ + UCHAR ucFlags; /* to be defined */ +} ATOM_Vega10_Thermal_Controller; + +typedef struct _ATOM_Vega10_VCE_State_Record +{ + UCHAR ucVCEClockIndex; /*index into usVCEDependencyTableOffset of 'ATOM_Vega10_MM_Dependency_Table' type */ + UCHAR ucFlag; /* 2 bits indicates memory p-states */ + UCHAR ucSCLKIndex; /* index into ATOM_Vega10_SCLK_Dependency_Table */ + UCHAR ucMCLKIndex; /* index into ATOM_Vega10_MCLK_Dependency_Table */ +} ATOM_Vega10_VCE_State_Record; + +typedef struct _ATOM_Vega10_VCE_State_Table +{ + UCHAR ucRevId; + UCHAR ucNumEntries; + ATOM_Vega10_VCE_State_Record entries[1]; +} ATOM_Vega10_VCE_State_Table; + +typedef struct _ATOM_Vega10_PowerTune_Table { + UCHAR ucRevId; + USHORT usSocketPowerLimit; + USHORT usBatteryPowerLimit; + USHORT usSmallPowerLimit; + USHORT usTdcLimit; + USHORT usEdcLimit; + USHORT usSoftwareShutdownTemp; + USHORT usTemperatureLimitHotSpot; + USHORT usTemperatureLimitLiquid1; + USHORT usTemperatureLimitLiquid2; + USHORT usTemperatureLimitHBM; + USHORT usTemperatureLimitVrSoc; + USHORT usTemperatureLimitVrMem; + USHORT usTemperatureLimitPlx; + USHORT usLoadLineResistance; + UCHAR ucLiquid1_I2C_address; + UCHAR ucLiquid2_I2C_address; + UCHAR ucVr_I2C_address; + UCHAR ucPlx_I2C_address; + UCHAR ucLiquid_I2C_LineSCL; + UCHAR ucLiquid_I2C_LineSDA; + UCHAR ucVr_I2C_LineSCL; + UCHAR ucVr_I2C_LineSDA; + UCHAR ucPlx_I2C_LineSCL; + UCHAR ucPlx_I2C_LineSDA; + USHORT usTemperatureLimitTedge; +} ATOM_Vega10_PowerTune_Table; + +typedef struct _ATOM_Vega10_Hard_Limit_Record { + ULONG ulSOCCLKLimit; + ULONG ulGFXCLKLimit; + ULONG ulMCLKLimit; + USHORT usVddcLimit; + USHORT usVddciLimit; + USHORT usVddMemLimit; +} ATOM_Vega10_Hard_Limit_Record; + +typedef struct _ATOM_Vega10_Hard_Limit_Table +{ + UCHAR ucRevId; + UCHAR ucNumEntries; + ATOM_Vega10_Hard_Limit_Record entries[1]; +} ATOM_Vega10_Hard_Limit_Table; + +typedef struct _Vega10_PPTable_Generic_SubTable_Header +{ + UCHAR ucRevId; +} Vega10_PPTable_Generic_SubTable_Header; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c new file mode 100644 index 0000000..518634f --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c @@ -0,0 +1,1056 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/fb.h> + +#include "vega10_processpptables.h" +#include "ppatomfwctrl.h" +#include "atomfirmware.h" +#include "pp_debug.h" +#include "cgs_common.h" +#include "vega10_pptable.h" + +static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, + enum phm_platform_caps cap) +{ + if (enable) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, cap); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, cap); +} + +static const void *get_powerplay_table(struct pp_hwmgr *hwmgr) +{ + int index = GetIndexIntoMasterDataTable(powerplayinfo); + + u16 size; + u8 frev, crev; + const void *table_address = hwmgr->soft_pp_table; + + if (!table_address) { + table_address = (ATOM_Vega10_POWERPLAYTABLE *) + cgs_atom_get_data_table(hwmgr->device, index, + &size, &frev, &crev); + + hwmgr->soft_pp_table = table_address; /*Cache the result in RAM.*/ + } + + return table_address; +} + +static int check_powerplay_tables( + struct pp_hwmgr *hwmgr, + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) +{ + const ATOM_Vega10_State_Array *state_arrays; + + state_arrays = (ATOM_Vega10_State_Array *)(((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usStateArrayOffset)); + + PP_ASSERT_WITH_CODE((powerplay_table->sHeader.format_revision >= + ATOM_Vega10_TABLE_REVISION_VEGA10), + "Unsupported PPTable format!", return -1); + PP_ASSERT_WITH_CODE(powerplay_table->usStateArrayOffset, + "State table is not set!", return -1); + PP_ASSERT_WITH_CODE(powerplay_table->sHeader.structuresize > 0, + "Invalid PowerPlay Table!", return -1); + PP_ASSERT_WITH_CODE(state_arrays->ucNumEntries > 0, + "Invalid PowerPlay Table!", return -1); + + return 0; +} + +static int set_platform_caps(struct pp_hwmgr *hwmgr, uint32_t powerplay_caps) +{ + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA10_PP_PLATFORM_CAP_POWERPLAY), + PHM_PlatformCaps_PowerPlaySupport); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA10_PP_PLATFORM_CAP_SBIOSPOWERSOURCE), + PHM_PlatformCaps_BiosPowerSourceControl); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA10_PP_PLATFORM_CAP_HARDWAREDC), + PHM_PlatformCaps_AutomaticDCTransition); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA10_PP_PLATFORM_CAP_BACO), + PHM_PlatformCaps_BACO); + + set_hw_cap( + hwmgr, + 0 != (powerplay_caps & ATOM_VEGA10_PP_PLATFORM_COMBINE_PCC_WITH_THERMAL_SIGNAL), + PHM_PlatformCaps_CombinePCCWithThermalSignal); + + return 0; +} + +static int init_thermal_controller( + struct pp_hwmgr *hwmgr, + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) +{ + const ATOM_Vega10_Thermal_Controller *thermal_controller; + const ATOM_Vega10_Fan_Table *fan_table; + + thermal_controller = (ATOM_Vega10_Thermal_Controller *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usThermalControllerOffset)); + + PP_ASSERT_WITH_CODE((powerplay_table->usThermalControllerOffset != 0), + "Thermal controller table not set!", return -1); + + hwmgr->thermal_controller.ucType = thermal_controller->ucType; + hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine; + hwmgr->thermal_controller.ucI2cAddress = thermal_controller->ucI2cAddress; + + hwmgr->thermal_controller.fanInfo.bNoFan = + (0 != (thermal_controller->ucFanParameters & + ATOM_VEGA10_PP_FANPARAMETERS_NOFAN)); + + hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution = + thermal_controller->ucFanParameters & + ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + + hwmgr->thermal_controller.fanInfo.ulMinRPM = + thermal_controller->ucFanMinRPM * 100UL; + hwmgr->thermal_controller.fanInfo.ulMaxRPM = + thermal_controller->ucFanMaxRPM * 100UL; + + set_hw_cap( + hwmgr, + ATOM_VEGA10_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType, + PHM_PlatformCaps_ThermalController); + + if (!powerplay_table->usFanTableOffset) + return 0; + + fan_table = (const ATOM_Vega10_Fan_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usFanTableOffset)); + + PP_ASSERT_WITH_CODE((fan_table->ucRevId >= 8), + "Invalid Input Fan Table!", return -1); + + hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay + = 100000; + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl); + + hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity = + le16_to_cpu(fan_table->usFanOutputSensitivity); + hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM = + le16_to_cpu(fan_table->usFanRPMMax); + hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = + le16_to_cpu(fan_table->usThrottlingRPM); + hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit = + le32_to_cpu((uint32_t)(fan_table->usFanAcousticLimit)); + hwmgr->thermal_controller.advanceFanControlParameters.usTMax = + le16_to_cpu(fan_table->usTargetTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin = + le16_to_cpu(fan_table->usMinimumPWMLimit); + hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk = + le32_to_cpu((uint32_t)(fan_table->usTargetGfxClk)); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge = + le16_to_cpu(fan_table->usFanGainEdge); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot = + le16_to_cpu(fan_table->usFanGainHotspot); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid = + le16_to_cpu(fan_table->usFanGainLiquid); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc = + le16_to_cpu(fan_table->usFanGainVrVddc); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd = + le16_to_cpu(fan_table->usFanGainVrMvdd); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx = + le16_to_cpu(fan_table->usFanGainPlx); + hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm = + le16_to_cpu(fan_table->usFanGainHbm); + + hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM = + fan_table->ucEnableZeroRPM; + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature = + le16_to_cpu(fan_table->usFanStopTemperature); + hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature = + le16_to_cpu(fan_table->usFanStartTemperature); + + return 0; +} + +static int init_over_drive_limits( + struct pp_hwmgr *hwmgr, + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) +{ + hwmgr->platform_descriptor.overdriveLimit.engineClock = + le32_to_cpu(powerplay_table->ulMaxODEngineClock); + hwmgr->platform_descriptor.overdriveLimit.memoryClock = + le32_to_cpu(powerplay_table->ulMaxODMemoryClock); + + hwmgr->platform_descriptor.minOverdriveVDDC = 0; + hwmgr->platform_descriptor.maxOverdriveVDDC = 0; + hwmgr->platform_descriptor.overdriveVDDCStep = 0; + + if (hwmgr->platform_descriptor.overdriveLimit.engineClock > 0 && + hwmgr->platform_descriptor.overdriveLimit.memoryClock > 0) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ACOverdriveSupport); + } + + return 0; +} + +static int get_mm_clock_voltage_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_mm_clock_voltage_dependency_table **vega10_mm_table, + const ATOM_Vega10_MM_Dependency_Table *mm_dependency_table) +{ + uint32_t table_size, i; + const ATOM_Vega10_MM_Dependency_Record *mm_dependency_record; + phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table; + + PP_ASSERT_WITH_CODE((mm_dependency_table->ucNumEntries != 0), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) * + mm_dependency_table->ucNumEntries; + mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!mm_table) + return -ENOMEM; + + mm_table->count = mm_dependency_table->ucNumEntries; + + for (i = 0; i < mm_dependency_table->ucNumEntries; i++) { + mm_dependency_record = &mm_dependency_table->entries[i]; + mm_table->entries[i].vddcInd = mm_dependency_record->ucVddcInd; + mm_table->entries[i].samclock = + le32_to_cpu(mm_dependency_record->ulPSPClk); + mm_table->entries[i].eclk = le32_to_cpu(mm_dependency_record->ulEClk); + mm_table->entries[i].vclk = le32_to_cpu(mm_dependency_record->ulVClk); + mm_table->entries[i].dclk = le32_to_cpu(mm_dependency_record->ulDClk); + } + + *vega10_mm_table = mm_table; + + return 0; +} + +static int get_tdp_table( + struct pp_hwmgr *hwmgr, + struct phm_tdp_table **info_tdp_table, + const Vega10_PPTable_Generic_SubTable_Header *table) +{ + uint32_t table_size; + struct phm_tdp_table *tdp_table; + + const ATOM_Vega10_PowerTune_Table *power_tune_table = + (ATOM_Vega10_PowerTune_Table *)table; + + table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table); + hwmgr->dyn_state.cac_dtp_table = (struct phm_cac_tdp_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!hwmgr->dyn_state.cac_dtp_table) + return -ENOMEM; + + table_size = sizeof(uint32_t) + sizeof(struct phm_tdp_table); + tdp_table = kzalloc(table_size, GFP_KERNEL); + + if (!tdp_table) { + kfree(hwmgr->dyn_state.cac_dtp_table); + hwmgr->dyn_state.cac_dtp_table = NULL; + return -ENOMEM; + } + + tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit); + tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit); + tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit); + tdp_table->usSoftwareShutdownTemp = + le16_to_cpu(power_tune_table->usSoftwareShutdownTemp); + tdp_table->usTemperatureLimitTedge = + le16_to_cpu(power_tune_table->usTemperatureLimitTedge); + tdp_table->usTemperatureLimitHotspot = + le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot); + tdp_table->usTemperatureLimitLiquid1 = + le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1); + tdp_table->usTemperatureLimitLiquid2 = + le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2); + tdp_table->usTemperatureLimitHBM = + le16_to_cpu(power_tune_table->usTemperatureLimitHBM); + tdp_table->usTemperatureLimitVrVddc = + le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc); + tdp_table->usTemperatureLimitVrMvdd = + le16_to_cpu(power_tune_table->usTemperatureLimitVrMem); + tdp_table->usTemperatureLimitPlx = + le16_to_cpu(power_tune_table->usTemperatureLimitPlx); + tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address; + tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address; + tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL; + tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA; + tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address; + tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL; + tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA; + tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address; + tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL; + tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA; + + hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance; + + *info_tdp_table = tdp_table; + + return 0; +} + +static int get_socclk_voltage_dependency_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table **pp_vega10_clk_dep_table, + const ATOM_Vega10_SOCCLK_Dependency_Table *clk_dep_table) +{ + uint32_t table_size, i; + phm_ppt_v1_clock_voltage_dependency_table *clk_table; + + PP_ASSERT_WITH_CODE(clk_dep_table->ucNumEntries, + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * + clk_dep_table->ucNumEntries; + + clk_table = (phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!clk_table) + return -ENOMEM; + + clk_table->count = (uint32_t)clk_dep_table->ucNumEntries; + + for (i = 0; i < clk_dep_table->ucNumEntries; i++) { + clk_table->entries[i].vddInd = + clk_dep_table->entries[i].ucVddInd; + clk_table->entries[i].clk = + le32_to_cpu(clk_dep_table->entries[i].ulClk); + } + + *pp_vega10_clk_dep_table = clk_table; + + return 0; +} + +static int get_mclk_voltage_dependency_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_clock_voltage_dependency_table **pp_vega10_mclk_dep_table, + const ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table) +{ + uint32_t table_size, i; + phm_ppt_v1_clock_voltage_dependency_table *mclk_table; + + PP_ASSERT_WITH_CODE(mclk_dep_table->ucNumEntries, + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * + mclk_dep_table->ucNumEntries; + + mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!mclk_table) + return -ENOMEM; + + mclk_table->count = (uint32_t)mclk_dep_table->ucNumEntries; + + for (i = 0; i < mclk_dep_table->ucNumEntries; i++) { + mclk_table->entries[i].vddInd = + mclk_dep_table->entries[i].ucVddInd; + mclk_table->entries[i].vddciInd = + mclk_dep_table->entries[i].ucVddciInd; + mclk_table->entries[i].mvddInd = + mclk_dep_table->entries[i].ucVddMemInd; + mclk_table->entries[i].clk = + le32_to_cpu(mclk_dep_table->entries[i].ulMemClk); + } + + *pp_vega10_mclk_dep_table = mclk_table; + + return 0; +} + +static int get_gfxclk_voltage_dependency_table( + struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table + **pp_vega10_clk_dep_table, + const ATOM_Vega10_GFXCLK_Dependency_Table *clk_dep_table) +{ + uint32_t table_size, i; + struct phm_ppt_v1_clock_voltage_dependency_table + *clk_table; + + PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * + clk_dep_table->ucNumEntries; + + clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!clk_table) + return -ENOMEM; + + clk_table->count = clk_dep_table->ucNumEntries; + + for (i = 0; i < clk_table->count; i++) { + clk_table->entries[i].vddInd = + clk_dep_table->entries[i].ucVddInd; + clk_table->entries[i].clk = + le32_to_cpu(clk_dep_table->entries[i].ulClk); + clk_table->entries[i].cks_enable = + (((clk_dep_table->entries[i].usCKSVOffsetandDisable & 0x80) + >> 15) == 0) ? 1 : 0; + clk_table->entries[i].cks_voffset = + (clk_dep_table->entries[i].usCKSVOffsetandDisable & 0x7F); + clk_table->entries[i].sclk_offset = + clk_dep_table->entries[i].usAVFSOffset; + } + + *pp_vega10_clk_dep_table = clk_table; + + return 0; +} + +static int get_dcefclk_voltage_dependency_table( + struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_clock_voltage_dependency_table + **pp_vega10_clk_dep_table, + const ATOM_Vega10_DCEFCLK_Dependency_Table *clk_dep_table) +{ + uint32_t table_size, i; + struct phm_ppt_v1_clock_voltage_dependency_table + *clk_table; + + PP_ASSERT_WITH_CODE((clk_dep_table->ucNumEntries != 0), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * + clk_dep_table->ucNumEntries; + + clk_table = (struct phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!clk_table) + return -ENOMEM; + + clk_table->count = clk_dep_table->ucNumEntries; + + for (i = 0; i < clk_table->count; i++) { + clk_table->entries[i].vddInd = + clk_dep_table->entries[i].ucVddInd; + clk_table->entries[i].clk = + le32_to_cpu(clk_dep_table->entries[i].ulClk); + } + + *pp_vega10_clk_dep_table = clk_table; + + return 0; +} + +static int get_pcie_table(struct pp_hwmgr *hwmgr, + struct phm_ppt_v1_pcie_table **vega10_pcie_table, + const Vega10_PPTable_Generic_SubTable_Header *table) +{ + uint32_t table_size, i, pcie_count; + struct phm_ppt_v1_pcie_table *pcie_table; + struct phm_ppt_v2_information *table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + const ATOM_Vega10_PCIE_Table *atom_pcie_table = + (ATOM_Vega10_PCIE_Table *)table; + + PP_ASSERT_WITH_CODE(atom_pcie_table->ucNumEntries, + "Invalid PowerPlay Table!", + return 0); + + table_size = sizeof(uint32_t) + + sizeof(struct phm_ppt_v1_pcie_record) * + atom_pcie_table->ucNumEntries; + + pcie_table = (struct phm_ppt_v1_pcie_table *) + kzalloc(table_size, GFP_KERNEL); + + if (!pcie_table) + return -ENOMEM; + + pcie_count = table_info->vdd_dep_on_sclk->count; + if (atom_pcie_table->ucNumEntries <= pcie_count) + pcie_count = atom_pcie_table->ucNumEntries; + else + pr_info("Number of Pcie Entries exceed the number of" + " GFXCLK Dpm Levels!" + " Disregarding the excess entries...\n"); + + pcie_table->count = pcie_count; + + for (i = 0; i < pcie_count; i++) { + pcie_table->entries[i].gen_speed = + atom_pcie_table->entries[i].ucPCIEGenSpeed; + pcie_table->entries[i].lane_width = + atom_pcie_table->entries[i].ucPCIELaneWidth; + pcie_table->entries[i].pcie_sclk = + atom_pcie_table->entries[i].ulLCLK; + } + + *vega10_pcie_table = pcie_table; + + return 0; +} + +static int get_hard_limits( + struct pp_hwmgr *hwmgr, + struct phm_clock_and_voltage_limits *limits, + const ATOM_Vega10_Hard_Limit_Table *limit_table) +{ + PP_ASSERT_WITH_CODE(limit_table->ucNumEntries, + "Invalid PowerPlay Table!", return -1); + + /* currently we always take entries[0] parameters */ + limits->sclk = le32_to_cpu(limit_table->entries[0].ulSOCCLKLimit); + limits->mclk = le32_to_cpu(limit_table->entries[0].ulMCLKLimit); + limits->gfxclk = le32_to_cpu(limit_table->entries[0].ulGFXCLKLimit); + limits->vddc = le16_to_cpu(limit_table->entries[0].usVddcLimit); + limits->vddci = le16_to_cpu(limit_table->entries[0].usVddciLimit); + limits->vddmem = le16_to_cpu(limit_table->entries[0].usVddMemLimit); + + return 0; +} + +static int get_valid_clk( + struct pp_hwmgr *hwmgr, + struct phm_clock_array **clk_table, + const phm_ppt_v1_clock_voltage_dependency_table *clk_volt_pp_table) +{ + uint32_t table_size, i; + struct phm_clock_array *table; + + PP_ASSERT_WITH_CODE(clk_volt_pp_table->count, + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + + sizeof(uint32_t) * clk_volt_pp_table->count; + + table = kzalloc(table_size, GFP_KERNEL); + + if (!table) + return -ENOMEM; + + table->count = (uint32_t)clk_volt_pp_table->count; + + for (i = 0; i < table->count; i++) + table->values[i] = (uint32_t)clk_volt_pp_table->entries[i].clk; + + *clk_table = table; + + return 0; +} + +static int init_powerplay_extended_tables( + struct pp_hwmgr *hwmgr, + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) +{ + int result = 0; + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + + const ATOM_Vega10_MM_Dependency_Table *mm_dependency_table = + (const ATOM_Vega10_MM_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usMMDependencyTableOffset)); + const Vega10_PPTable_Generic_SubTable_Header *power_tune_table = + (const Vega10_PPTable_Generic_SubTable_Header *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usPowerTuneTableOffset)); + const ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table = + (const ATOM_Vega10_SOCCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset)); + const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table = + (const ATOM_Vega10_GFXCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset)); + const ATOM_Vega10_DCEFCLK_Dependency_Table *dcefclk_dep_table = + (const ATOM_Vega10_DCEFCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usDcefclkDependencyTableOffset)); + const ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table = + (const ATOM_Vega10_MCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); + const ATOM_Vega10_Hard_Limit_Table *hard_limits = + (const ATOM_Vega10_Hard_Limit_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usHardLimitTableOffset)); + const Vega10_PPTable_Generic_SubTable_Header *pcie_table = + (const Vega10_PPTable_Generic_SubTable_Header *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usPCIETableOffset)); + const ATOM_Vega10_PIXCLK_Dependency_Table *pixclk_dep_table = + (const ATOM_Vega10_PIXCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usPixclkDependencyTableOffset)); + const ATOM_Vega10_PHYCLK_Dependency_Table *phyclk_dep_table = + (const ATOM_Vega10_PHYCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usPhyClkDependencyTableOffset)); + const ATOM_Vega10_DISPCLK_Dependency_Table *dispclk_dep_table = + (const ATOM_Vega10_DISPCLK_Dependency_Table *) + (((unsigned long) powerplay_table) + + le16_to_cpu(powerplay_table->usDispClkDependencyTableOffset)); + + pp_table_info->vdd_dep_on_socclk = NULL; + pp_table_info->vdd_dep_on_sclk = NULL; + pp_table_info->vdd_dep_on_mclk = NULL; + pp_table_info->vdd_dep_on_dcefclk = NULL; + pp_table_info->mm_dep_table = NULL; + pp_table_info->tdp_table = NULL; + pp_table_info->vdd_dep_on_pixclk = NULL; + pp_table_info->vdd_dep_on_phyclk = NULL; + pp_table_info->vdd_dep_on_dispclk = NULL; + + if (powerplay_table->usMMDependencyTableOffset) + result = get_mm_clock_voltage_table(hwmgr, + &pp_table_info->mm_dep_table, + mm_dependency_table); + + if (!result && powerplay_table->usPowerTuneTableOffset) + result = get_tdp_table(hwmgr, + &pp_table_info->tdp_table, + power_tune_table); + + if (!result && powerplay_table->usSocclkDependencyTableOffset) + result = get_socclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_socclk, + socclk_dep_table); + + if (!result && powerplay_table->usGfxclkDependencyTableOffset) + result = get_gfxclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_sclk, + gfxclk_dep_table); + + if (!result && powerplay_table->usPixclkDependencyTableOffset) + result = get_dcefclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_pixclk, + (const ATOM_Vega10_DCEFCLK_Dependency_Table*) + pixclk_dep_table); + + if (!result && powerplay_table->usPhyClkDependencyTableOffset) + result = get_dcefclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_phyclk, + (const ATOM_Vega10_DCEFCLK_Dependency_Table *) + phyclk_dep_table); + + if (!result && powerplay_table->usDispClkDependencyTableOffset) + result = get_dcefclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_dispclk, + (const ATOM_Vega10_DCEFCLK_Dependency_Table *) + dispclk_dep_table); + + if (!result && powerplay_table->usDcefclkDependencyTableOffset) + result = get_dcefclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_dcefclk, + dcefclk_dep_table); + + if (!result && powerplay_table->usMclkDependencyTableOffset) + result = get_mclk_voltage_dependency_table(hwmgr, + &pp_table_info->vdd_dep_on_mclk, + mclk_dep_table); + + if (!result && powerplay_table->usPCIETableOffset) + result = get_pcie_table(hwmgr, + &pp_table_info->pcie_table, + pcie_table); + + if (!result && powerplay_table->usHardLimitTableOffset) + result = get_hard_limits(hwmgr, + &pp_table_info->max_clock_voltage_on_dc, + hard_limits); + + hwmgr->dyn_state.max_clock_voltage_on_dc.sclk = + pp_table_info->max_clock_voltage_on_dc.sclk; + hwmgr->dyn_state.max_clock_voltage_on_dc.mclk = + pp_table_info->max_clock_voltage_on_dc.mclk; + hwmgr->dyn_state.max_clock_voltage_on_dc.vddc = + pp_table_info->max_clock_voltage_on_dc.vddc; + hwmgr->dyn_state.max_clock_voltage_on_dc.vddci = + pp_table_info->max_clock_voltage_on_dc.vddci; + + if (!result && + pp_table_info->vdd_dep_on_socclk && + pp_table_info->vdd_dep_on_socclk->count) + result = get_valid_clk(hwmgr, + &pp_table_info->valid_socclk_values, + pp_table_info->vdd_dep_on_socclk); + + if (!result && + pp_table_info->vdd_dep_on_sclk && + pp_table_info->vdd_dep_on_sclk->count) + result = get_valid_clk(hwmgr, + &pp_table_info->valid_sclk_values, + pp_table_info->vdd_dep_on_sclk); + + if (!result && + pp_table_info->vdd_dep_on_dcefclk && + pp_table_info->vdd_dep_on_dcefclk->count) + result = get_valid_clk(hwmgr, + &pp_table_info->valid_dcefclk_values, + pp_table_info->vdd_dep_on_dcefclk); + + if (!result && + pp_table_info->vdd_dep_on_mclk && + pp_table_info->vdd_dep_on_mclk->count) + result = get_valid_clk(hwmgr, + &pp_table_info->valid_mclk_values, + pp_table_info->vdd_dep_on_mclk); + + return result; +} + +static int get_vddc_lookup_table( + struct pp_hwmgr *hwmgr, + phm_ppt_v1_voltage_lookup_table **lookup_table, + const ATOM_Vega10_Voltage_Lookup_Table *vddc_lookup_pp_tables, + uint32_t max_levels) +{ + uint32_t table_size, i; + phm_ppt_v1_voltage_lookup_table *table; + + PP_ASSERT_WITH_CODE((vddc_lookup_pp_tables->ucNumEntries != 0), + "Invalid SOC_VDDD Lookup Table!", return 1); + + table_size = sizeof(uint32_t) + + sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels; + + table = (phm_ppt_v1_voltage_lookup_table *) + kzalloc(table_size, GFP_KERNEL); + + if (NULL == table) + return -ENOMEM; + + table->count = vddc_lookup_pp_tables->ucNumEntries; + + for (i = 0; i < vddc_lookup_pp_tables->ucNumEntries; i++) + table->entries[i].us_vdd = + le16_to_cpu(vddc_lookup_pp_tables->entries[i].usVdd); + + *lookup_table = table; + + return 0; +} + +static int init_dpm_2_parameters( + struct pp_hwmgr *hwmgr, + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table) +{ + int result = 0; + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + uint32_t disable_power_control = 0; + + pp_table_info->us_ulv_voltage_offset = + le16_to_cpu(powerplay_table->usUlvVoltageOffset); + + pp_table_info->us_ulv_smnclk_did = + le16_to_cpu(powerplay_table->usUlvSmnclkDid); + pp_table_info->us_ulv_mp1clk_did = + le16_to_cpu(powerplay_table->usUlvMp1clkDid); + pp_table_info->us_ulv_gfxclk_bypass = + le16_to_cpu(powerplay_table->usUlvGfxclkBypass); + pp_table_info->us_gfxclk_slew_rate = + le16_to_cpu(powerplay_table->usGfxclkSlewRate); + pp_table_info->uc_gfx_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucGfxVoltageMode); + pp_table_info->uc_soc_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucSocVoltageMode); + pp_table_info->uc_uclk_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucUclkVoltageMode); + pp_table_info->uc_uvd_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucUvdVoltageMode); + pp_table_info->uc_vce_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucVceVoltageMode); + pp_table_info->uc_mp0_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucMp0VoltageMode); + pp_table_info->uc_dcef_dpm_voltage_mode = + le16_to_cpu(powerplay_table->ucDcefVoltageMode); + + pp_table_info->ppm_parameter_table = NULL; + pp_table_info->vddc_lookup_table = NULL; + pp_table_info->vddmem_lookup_table = NULL; + pp_table_info->vddci_lookup_table = NULL; + + /* TDP limits */ + hwmgr->platform_descriptor.TDPODLimit = + le16_to_cpu(powerplay_table->usPowerControlLimit); + hwmgr->platform_descriptor.TDPAdjustment = 0; + hwmgr->platform_descriptor.VidAdjustment = 0; + hwmgr->platform_descriptor.VidAdjustmentPolarity = 0; + hwmgr->platform_descriptor.VidMinLimit = 0; + hwmgr->platform_descriptor.VidMaxLimit = 1500000; + hwmgr->platform_descriptor.VidStep = 6250; + + disable_power_control = 0; + if (!disable_power_control) { + /* enable TDP overdrive (PowerControl) feature as well if supported */ + if (hwmgr->platform_descriptor.TDPODLimit) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerControl); + } + + if (powerplay_table->usVddcLookupTableOffset) { + const ATOM_Vega10_Voltage_Lookup_Table *vddc_table = + (ATOM_Vega10_Voltage_Lookup_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usVddcLookupTableOffset)); + result = get_vddc_lookup_table(hwmgr, + &pp_table_info->vddc_lookup_table, vddc_table, 16); + } + + if (powerplay_table->usVddmemLookupTableOffset) { + const ATOM_Vega10_Voltage_Lookup_Table *vdd_mem_table = + (ATOM_Vega10_Voltage_Lookup_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usVddmemLookupTableOffset)); + result = get_vddc_lookup_table(hwmgr, + &pp_table_info->vddmem_lookup_table, vdd_mem_table, 16); + } + + if (powerplay_table->usVddciLookupTableOffset) { + const ATOM_Vega10_Voltage_Lookup_Table *vddci_table = + (ATOM_Vega10_Voltage_Lookup_Table *) + (((unsigned long)powerplay_table) + + le16_to_cpu(powerplay_table->usVddciLookupTableOffset)); + result = get_vddc_lookup_table(hwmgr, + &pp_table_info->vddci_lookup_table, vddci_table, 16); + } + + return result; +} + +int vega10_pp_tables_initialize(struct pp_hwmgr *hwmgr) +{ + int result = 0; + const ATOM_Vega10_POWERPLAYTABLE *powerplay_table; + + hwmgr->pptable = kzalloc(sizeof(struct phm_ppt_v2_information), GFP_KERNEL); + + PP_ASSERT_WITH_CODE((NULL != hwmgr->pptable), + "Failed to allocate hwmgr->pptable!", return -ENOMEM); + + powerplay_table = get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE((NULL != powerplay_table), + "Missing PowerPlay Table!", return -1); + + result = check_powerplay_tables(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "check_powerplay_tables failed", return result); + + result = set_platform_caps(hwmgr, + le32_to_cpu(powerplay_table->ulPlatformCaps)); + + PP_ASSERT_WITH_CODE((result == 0), + "set_platform_caps failed", return result); + + result = init_thermal_controller(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_thermal_controller failed", return result); + + result = init_over_drive_limits(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_over_drive_limits failed", return result); + + result = init_powerplay_extended_tables(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_powerplay_extended_tables failed", return result); + + result = init_dpm_2_parameters(hwmgr, powerplay_table); + + PP_ASSERT_WITH_CODE((result == 0), + "init_dpm_2_parameters failed", return result); + + return result; +} + +static int vega10_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) +{ + int result = 0; + struct phm_ppt_v2_information *pp_table_info = + (struct phm_ppt_v2_information *)(hwmgr->pptable); + + kfree(pp_table_info->vdd_dep_on_sclk); + pp_table_info->vdd_dep_on_sclk = NULL; + + kfree(pp_table_info->vdd_dep_on_mclk); + pp_table_info->vdd_dep_on_mclk = NULL; + + kfree(pp_table_info->valid_mclk_values); + pp_table_info->valid_mclk_values = NULL; + + kfree(pp_table_info->valid_sclk_values); + pp_table_info->valid_sclk_values = NULL; + + kfree(pp_table_info->vddc_lookup_table); + pp_table_info->vddc_lookup_table = NULL; + + kfree(pp_table_info->vddmem_lookup_table); + pp_table_info->vddmem_lookup_table = NULL; + + kfree(pp_table_info->vddci_lookup_table); + pp_table_info->vddci_lookup_table = NULL; + + kfree(pp_table_info->ppm_parameter_table); + pp_table_info->ppm_parameter_table = NULL; + + kfree(pp_table_info->mm_dep_table); + pp_table_info->mm_dep_table = NULL; + + kfree(pp_table_info->cac_dtp_table); + pp_table_info->cac_dtp_table = NULL; + + kfree(hwmgr->dyn_state.cac_dtp_table); + hwmgr->dyn_state.cac_dtp_table = NULL; + + kfree(pp_table_info->tdp_table); + pp_table_info->tdp_table = NULL; + + kfree(hwmgr->pptable); + hwmgr->pptable = NULL; + + return result; +} + +const struct pp_table_func vega10_pptable_funcs = { + .pptable_init = vega10_pp_tables_initialize, + .pptable_fini = vega10_pp_tables_uninitialize, +}; + +int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr) +{ + const ATOM_Vega10_State_Array *state_arrays; + const ATOM_Vega10_POWERPLAYTABLE *pp_table = get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE((NULL != pp_table), + "Missing PowerPlay Table!", return -1); + PP_ASSERT_WITH_CODE((pp_table->sHeader.format_revision >= + ATOM_Vega10_TABLE_REVISION_VEGA10), + "Incorrect PowerPlay table revision!", return -1); + + state_arrays = (ATOM_Vega10_State_Array *)(((unsigned long)pp_table) + + le16_to_cpu(pp_table->usStateArrayOffset)); + + return (uint32_t)(state_arrays->ucNumEntries); +} + +static uint32_t make_classification_flags(struct pp_hwmgr *hwmgr, + uint16_t classification, uint16_t classification2) +{ + uint32_t result = 0; + + if (classification & ATOM_PPLIB_CLASSIFICATION_BOOT) + result |= PP_StateClassificationFlag_Boot; + + if (classification & ATOM_PPLIB_CLASSIFICATION_THERMAL) + result |= PP_StateClassificationFlag_Thermal; + + if (classification & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) + result |= PP_StateClassificationFlag_LimitedPowerSource; + + if (classification & ATOM_PPLIB_CLASSIFICATION_REST) + result |= PP_StateClassificationFlag_Rest; + + if (classification & ATOM_PPLIB_CLASSIFICATION_FORCED) + result |= PP_StateClassificationFlag_Forced; + + if (classification & ATOM_PPLIB_CLASSIFICATION_ACPI) + result |= PP_StateClassificationFlag_ACPI; + + if (classification2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) + result |= PP_StateClassificationFlag_LimitedPowerSource_2; + + return result; +} + +int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, + uint32_t entry_index, struct pp_power_state *power_state, + int (*call_back_func)(struct pp_hwmgr *, void *, + struct pp_power_state *, void *, uint32_t)) +{ + int result = 0; + const ATOM_Vega10_State_Array *state_arrays; + const ATOM_Vega10_State *state_entry; + const ATOM_Vega10_POWERPLAYTABLE *pp_table = + get_powerplay_table(hwmgr); + + PP_ASSERT_WITH_CODE(pp_table, "Missing PowerPlay Table!", + return -1;); + power_state->classification.bios_index = entry_index; + + if (pp_table->sHeader.format_revision >= + ATOM_Vega10_TABLE_REVISION_VEGA10) { + state_arrays = (ATOM_Vega10_State_Array *) + (((unsigned long)pp_table) + + le16_to_cpu(pp_table->usStateArrayOffset)); + + PP_ASSERT_WITH_CODE(pp_table->usStateArrayOffset > 0, + "Invalid PowerPlay Table State Array Offset.", + return -1); + PP_ASSERT_WITH_CODE(state_arrays->ucNumEntries > 0, + "Invalid PowerPlay Table State Array.", + return -1); + PP_ASSERT_WITH_CODE((entry_index <= state_arrays->ucNumEntries), + "Invalid PowerPlay Table State Array Entry.", + return -1); + + state_entry = &(state_arrays->states[entry_index]); + + result = call_back_func(hwmgr, (void *)state_entry, power_state, + (void *)pp_table, + make_classification_flags(hwmgr, + le16_to_cpu(state_entry->usClassification), + le16_to_cpu(state_entry->usClassification2))); + } + + if (!result && (power_state->classification.flags & + PP_StateClassificationFlag_Boot)) + result = hwmgr->hwmgr_func->patch_boot_state(hwmgr, &(power_state->hardware)); + + return result; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h new file mode 100644 index 0000000..995d133 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h @@ -0,0 +1,34 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA10_PROCESSPPTABLES_H +#define VEGA10_PROCESSPPTABLES_H + +#include "hwmgr.h" + +extern const struct pp_table_func vega10_pptable_funcs; +extern int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr); +extern int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index, + struct pp_power_state *power_state, int (*call_back_func)(struct pp_hwmgr *, void *, + struct pp_power_state *, void *, uint32_t)); +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c new file mode 100644 index 0000000..f4d77b6 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c @@ -0,0 +1,761 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "vega10_thermal.h" +#include "vega10_hwmgr.h" +#include "vega10_smumgr.h" +#include "vega10_ppsmc.h" +#include "vega10_inc.h" +#include "pp_soc15.h" +#include "pp_debug.h" + +static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm) +{ + PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_GetCurrentRpm), + "Attempt to get current RPM from SMC Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr, + current_rpm), + "Attempt to read current RPM from SMC Failed!", + return -1); + return 0; +} + +int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, + struct phm_fan_speed_info *fan_speed_info) +{ + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + fan_speed_info->supports_percent_read = true; + fan_speed_info->supports_percent_write = true; + fan_speed_info->min_percent = 0; + fan_speed_info->max_percent = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_FanSpeedInTableIsRPM) && + hwmgr->thermal_controller.fanInfo. + ucTachometerPulsesPerRevolution) { + fan_speed_info->supports_rpm_read = true; + fan_speed_info->supports_rpm_write = true; + fan_speed_info->min_rpm = + hwmgr->thermal_controller.fanInfo.ulMinRPM; + fan_speed_info->max_rpm = + hwmgr->thermal_controller.fanInfo.ulMaxRPM; + } else { + fan_speed_info->min_rpm = 0; + fan_speed_info->max_rpm = 0; + } + + return 0; +} + +int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t *speed) +{ + uint32_t current_rpm; + uint32_t percent = 0; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (vega10_get_current_rpm(hwmgr, ¤t_rpm)) + return -1; + + if (hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM != 0) + percent = current_rpm * 100 / + hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM; + + *speed = percent > 100 ? 100 : percent; + + return 0; +} + +int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + uint32_t tach_period; + uint32_t crystal_clock_freq; + int result = 0; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return -1; + + if (data->smu_features[GNLD_FAN_CONTROL].supported) + result = vega10_get_current_rpm(hwmgr, speed); + else { + uint32_t reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); + tach_period = (cgs_read_register(hwmgr->device, + reg) & CG_TACH_STATUS__TACH_PERIOD_MASK) >> + CG_TACH_STATUS__TACH_PERIOD__SHIFT; + + if (tach_period == 0) + return -EINVAL; + + crystal_clock_freq = smu7_get_xclk(hwmgr); + + *speed = 60 * crystal_clock_freq * 10000 / tach_period; + } + + return result; +} + +/** +* Set Fan Speed Control to static mode, +* so that the user can decide what speed to use. +* @param hwmgr the address of the powerplay hardware manager. +* mode the fan control mode, 0 default, 1 by percent, 5, by RPM +* @exception Should always succeed. +*/ +int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode) +{ + uint32_t reg; + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + + if (hwmgr->fan_ctrl_is_in_default_mode) { + hwmgr->fan_ctrl_default_mode = + (cgs_read_register(hwmgr->device, reg) & + CG_FDO_CTRL2__FDO_PWM_MODE_MASK) >> + CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT; + hwmgr->tmin = (cgs_read_register(hwmgr->device, reg) & + CG_FDO_CTRL2__TMIN_MASK) >> + CG_FDO_CTRL2__TMIN__SHIFT; + hwmgr->fan_ctrl_is_in_default_mode = false; + } + + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL2__TMIN_MASK) | + (0 << CG_FDO_CTRL2__TMIN__SHIFT)); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) | + (mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT)); + + return 0; +} + +/** +* Reset Fan Speed Control to default mode. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Should always succeed. +*/ +int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr) +{ + uint32_t reg; + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + + if (!hwmgr->fan_ctrl_is_in_default_mode) { + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK) | + (hwmgr->fan_ctrl_default_mode << + CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT)); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL2__TMIN_MASK) | + (hwmgr->tmin << CG_FDO_CTRL2__TMIN__SHIFT)); + hwmgr->fan_ctrl_is_in_default_mode = true; + } + + return 0; +} + +/** + * @fn vega10_enable_fan_control_feature + * @brief Enables the SMC Fan Control Feature. + * + * @param hwmgr - the address of the powerplay hardware manager. + * @return 0 on success. -1 otherwise. + */ +static int vega10_enable_fan_control_feature(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_FAN_CONTROL].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features( + hwmgr->smumgr, true, + data->smu_features[GNLD_FAN_CONTROL]. + smu_feature_bitmap), + "Attempt to Enable FAN CONTROL feature Failed!", + return -1); + data->smu_features[GNLD_FAN_CONTROL].enabled = true; + } + + return 0; +} + +static int vega10_disable_fan_control_feature(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_FAN_CONTROL].supported) { + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features( + hwmgr->smumgr, false, + data->smu_features[GNLD_FAN_CONTROL]. + smu_feature_bitmap), + "Attempt to Enable FAN CONTROL feature Failed!", + return -1); + data->smu_features[GNLD_FAN_CONTROL].enabled = false; + } + + return 0; +} + +int vega10_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return -1; + + PP_ASSERT_WITH_CODE(!vega10_enable_fan_control_feature(hwmgr), + "Attempt to Enable SMC FAN CONTROL Feature Failed!", + return -1); + + return 0; +} + + +int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return -1; + + if (data->smu_features[GNLD_FAN_CONTROL].supported) { + PP_ASSERT_WITH_CODE(!vega10_disable_fan_control_feature(hwmgr), + "Attempt to Disable SMC FAN CONTROL Feature Failed!", + return -1); + } + return 0; +} + +/** +* Set Fan Speed in percent. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (0% - 100%) to be set. +* @exception Fails is the 100% setting appears to be 0. +*/ +int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t speed) +{ + uint32_t duty100; + uint32_t duty; + uint64_t tmp64; + uint32_t reg; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (speed > 100) + speed = 100; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + vega10_fan_ctrl_stop_smc_fan_control(hwmgr); + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL1_BASE_IDX, mmCG_FDO_CTRL1); + + duty100 = (cgs_read_register(hwmgr->device, reg) & + CG_FDO_CTRL1__FMAX_DUTY100_MASK) >> + CG_FDO_CTRL1__FMAX_DUTY100__SHIFT; + + if (duty100 == 0) + return -EINVAL; + + tmp64 = (uint64_t)speed * duty100; + do_div(tmp64, 100); + duty = (uint32_t)tmp64; + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL0_BASE_IDX, mmCG_FDO_CTRL0); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK) | + (duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT)); + + return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); +} + +/** +* Reset Fan Speed to default. +* @param hwmgr the address of the powerplay hardware manager. +* @exception Always succeeds. +*/ +int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr) +{ + int result; + + if (hwmgr->thermal_controller.fanInfo.bNoFan) + return 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) { + result = vega10_fan_ctrl_set_static_mode(hwmgr, + FDO_PWM_MODE_STATIC); + if (!result) + result = vega10_fan_ctrl_start_smc_fan_control(hwmgr); + } else + result = vega10_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set Fan Speed in RPM. +* @param hwmgr the address of the powerplay hardware manager. +* @param speed is the percentage value (min - max) to be set. +* @exception Fails is the speed not lie between min and max. +*/ +int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t speed) +{ + uint32_t tach_period; + uint32_t crystal_clock_freq; + int result = 0; + uint32_t reg; + + if (hwmgr->thermal_controller.fanInfo.bNoFan || + (speed < hwmgr->thermal_controller.fanInfo.ulMinRPM) || + (speed > hwmgr->thermal_controller.fanInfo.ulMaxRPM)) + return -1; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) + result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr); + + if (!result) { + crystal_clock_freq = smu7_get_xclk(hwmgr); + tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_TACH_STATUS_BASE_IDX, mmCG_TACH_STATUS); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_TACH_STATUS__TACH_PERIOD_MASK) | + (tach_period << CG_TACH_STATUS__TACH_PERIOD__SHIFT)); + } + return vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC_RPM); +} + +/** +* Reads the remote temperature from the SIslands thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr) +{ + int temp; + uint32_t reg; + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_TACH_STATUS_BASE_IDX, mmCG_MULT_THERMAL_STATUS); + + temp = cgs_read_register(hwmgr->device, reg); + + temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >> + CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT; + + /* Bit 9 means the reading is lower than the lowest usable value. */ + if (temp & 0x200) + temp = VEGA10_THERMAL_MAXIMUM_TEMP_READING; + else + temp = temp & 0x1ff; + + temp *= PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + + return temp; +} + +/** +* Set the requested temperature range for high and low alert signals +* +* @param hwmgr The address of the hardware manager. +* @param range Temperature range to be programmed for +* high and low alert signals +* @exception PP_Result_BadInput if the input data is not valid. +*/ +static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *range) +{ + uint32_t low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + uint32_t high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP * + PP_TEMPERATURE_UNITS_PER_CENTIGRADES; + uint32_t val, reg; + + if (low < range->min) + low = range->min; + if (high > range->max) + high = range->max; + + if (low > high) + return -EINVAL; + + reg = soc15_get_register_offset(THM_HWID, 0, + mmTHM_THERMAL_INT_CTRL_BASE_IDX, mmTHM_THERMAL_INT_CTRL); + + val = cgs_read_register(hwmgr->device, reg); + val &= ~(THM_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK); + val |= (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) << + THM_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT; + val &= ~(THM_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); + val |= (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) << + THM_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT; + cgs_write_register(hwmgr->device, reg, val); + + reg = soc15_get_register_offset(THM_HWID, 0, + mmTHM_TCON_HTC_BASE_IDX, mmTHM_TCON_HTC); + + val = cgs_read_register(hwmgr->device, reg); + val &= ~(THM_TCON_HTC__HTC_TMP_LMT_MASK); + val |= (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES) << + THM_TCON_HTC__HTC_TMP_LMT__SHIFT; + cgs_write_register(hwmgr->device, reg, val); + + return 0; +} + +/** +* Programs thermal controller one-time setting registers +* +* @param hwmgr The address of the hardware manager. +*/ +static int vega10_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + uint32_t reg; + + if (hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution) { + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_TACH_CTRL_BASE_IDX, mmCG_TACH_CTRL); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_TACH_CTRL__EDGE_PER_REV_MASK) | + ((hwmgr->thermal_controller.fanInfo. + ucTachometerPulsesPerRevolution - 1) << + CG_TACH_CTRL__EDGE_PER_REV__SHIFT)); + } + + reg = soc15_get_register_offset(THM_HWID, 0, + mmCG_FDO_CTRL2_BASE_IDX, mmCG_FDO_CTRL2); + cgs_write_register(hwmgr->device, reg, + (cgs_read_register(hwmgr->device, reg) & + ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK) | + (0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT)); + + return 0; +} + +/** +* Enable thermal alerts on the RV770 thermal controller. +* +* @param hwmgr The address of the hardware manager. +*/ +static int vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_FW_CTF].supported) { + if (data->smu_features[GNLD_FW_CTF].enabled) + printk("[Thermal_EnableAlert] FW CTF Already Enabled!\n"); + } + + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + true, + data->smu_features[GNLD_FW_CTF].smu_feature_bitmap), + "Attempt to Enable FW CTF feature Failed!", + return -1); + data->smu_features[GNLD_FW_CTF].enabled = true; + return 0; +} + +/** +* Disable thermal alerts on the RV770 thermal controller. +* @param hwmgr The address of the hardware manager. +*/ +static int vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr) +{ + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + + if (data->smu_features[GNLD_FW_CTF].supported) { + if (!data->smu_features[GNLD_FW_CTF].enabled) + printk("[Thermal_EnableAlert] FW CTF Already disabled!\n"); + } + + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr, + false, + data->smu_features[GNLD_FW_CTF].smu_feature_bitmap), + "Attempt to disable FW CTF feature Failed!", + return -1); + data->smu_features[GNLD_FW_CTF].enabled = false; + return 0; +} + +/** +* Uninitialize the thermal controller. +* Currently just disables alerts. +* @param hwmgr The address of the hardware manager. +*/ +int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr) +{ + int result = vega10_thermal_disable_alert(hwmgr); + + if (!hwmgr->thermal_controller.fanInfo.bNoFan) + vega10_fan_ctrl_set_default_mode(hwmgr); + + return result; +} + +/** +* Set up the fan table to control the fan using the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + int ret; + struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend); + PPTable_t *table = &(data->smc_state_table.pp_table); + + if (!data->smu_features[GNLD_FAN_CONTROL].supported) + return 0; + + table->FanMaximumRpm = (uint16_t)hwmgr->thermal_controller. + advanceFanControlParameters.usMaxFanRPM; + table->FanThrottlingRpm = hwmgr->thermal_controller. + advanceFanControlParameters.usFanRPMMaxLimit; + table->FanAcousticLimitRpm = (uint16_t)(hwmgr->thermal_controller. + advanceFanControlParameters.ulMinFanSCLKAcousticLimit); + table->FanTargetTemperature = hwmgr->thermal_controller. + advanceFanControlParameters.usTMax; + table->FanPwmMin = hwmgr->thermal_controller. + advanceFanControlParameters.usPWMMin * 255 / 100; + table->FanTargetGfxclk = (uint16_t)(hwmgr->thermal_controller. + advanceFanControlParameters.ulTargetGfxClk); + table->FanGainEdge = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainEdge; + table->FanGainHotspot = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainHotspot; + table->FanGainLiquid = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainLiquid; + table->FanGainVrVddc = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainVrVddc; + table->FanGainVrMvdd = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainVrMvdd; + table->FanGainPlx = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainPlx; + table->FanGainHbm = hwmgr->thermal_controller. + advanceFanControlParameters.usFanGainHbm; + table->FanZeroRpmEnable = hwmgr->thermal_controller. + advanceFanControlParameters.ucEnableZeroRPM; + table->FanStopTemp = hwmgr->thermal_controller. + advanceFanControlParameters.usZeroRPMStopTemperature; + table->FanStartTemp = hwmgr->thermal_controller. + advanceFanControlParameters.usZeroRPMStartTemperature; + + ret = vega10_copy_table_to_smc(hwmgr->smumgr, + (uint8_t *)(&(data->smc_state_table.pp_table)), PPTABLE); + if (ret) + pr_info("Failed to update Fan Control Table in PPTable!"); + + return ret; +} + +/** +* Start the fan control on the SMC. +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_vega10_thermal_start_smc_fan_control(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ +/* If the fantable setup has failed we could have disabled + * PHM_PlatformCaps_MicrocodeFanControl even after + * this function was included in the table. + * Make sure that we still think controlling the fan is OK. +*/ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_MicrocodeFanControl)) { + vega10_fan_ctrl_start_smc_fan_control(hwmgr); + vega10_fan_ctrl_set_static_mode(hwmgr, FDO_PWM_MODE_STATIC); + } + + return 0; +} + +/** +* Set temperature range for high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from set temperature range routine +*/ +int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + struct PP_TemperatureRange *range = (struct PP_TemperatureRange *)input; + + if (range == NULL) + return -EINVAL; + + return vega10_thermal_set_temperature_range(hwmgr, range); +} + +/** +* Programs one-time setting registers +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from initialize thermal controller routine +*/ +int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return vega10_thermal_initialize(hwmgr); +} + +/** +* Enable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from enable alert routine +*/ +int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return vega10_thermal_enable_alert(hwmgr); +} + +/** +* Disable high and low alerts +* @param hwmgr the address of the powerplay hardware manager. +* @param pInput the pointer to input data +* @param pOutput the pointer to output data +* @param pStorage the pointer to temporary storage +* @param Result the last failure code +* @return result from disable alert routine +*/ +static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result) +{ + return vega10_thermal_disable_alert(hwmgr); +} + +static struct phm_master_table_item +vega10_thermal_start_thermal_controller_master_list[] = { + {NULL, tf_vega10_thermal_initialize}, + {NULL, tf_vega10_thermal_set_temperature_range}, + {NULL, tf_vega10_thermal_enable_alert}, +/* We should restrict performance levels to low before we halt the SMC. + * On the other hand we are still in boot state when we do this + * so it would be pointless. + * If this assumption changes we have to revisit this table. + */ + {NULL, tf_vega10_thermal_setup_fan_table}, + {NULL, tf_vega10_thermal_start_smc_fan_control}, + {NULL, NULL} +}; + +static struct phm_master_table_header +vega10_thermal_start_thermal_controller_master = { + 0, + PHM_MasterTableFlag_None, + vega10_thermal_start_thermal_controller_master_list +}; + +static struct phm_master_table_item +vega10_thermal_set_temperature_range_master_list[] = { + {NULL, tf_vega10_thermal_disable_alert}, + {NULL, tf_vega10_thermal_set_temperature_range}, + {NULL, tf_vega10_thermal_enable_alert}, + {NULL, NULL} +}; + +struct phm_master_table_header +vega10_thermal_set_temperature_range_master = { + 0, + PHM_MasterTableFlag_None, + vega10_thermal_set_temperature_range_master_list +}; + +int vega10_thermal_ctrl_uninitialize_thermal_controller(struct pp_hwmgr *hwmgr) +{ + if (!hwmgr->thermal_controller.fanInfo.bNoFan) { + vega10_fan_ctrl_set_default_mode(hwmgr); + vega10_fan_ctrl_stop_smc_fan_control(hwmgr); + } + return 0; +} + +/** +* Initializes the thermal controller related functions +* in the Hardware Manager structure. +* @param hwmgr The address of the hardware manager. +* @exception Any error code from the low-level communication. +*/ +int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr) +{ + int result; + + result = phm_construct_table(hwmgr, + &vega10_thermal_set_temperature_range_master, + &(hwmgr->set_temperature_range)); + + if (!result) { + result = phm_construct_table(hwmgr, + &vega10_thermal_start_thermal_controller_master, + &(hwmgr->start_thermal_controller)); + if (result) + phm_destroy_table(hwmgr, + &(hwmgr->set_temperature_range)); + } + + if (!result) + hwmgr->fan_ctrl_is_in_default_mode = true; + return result; +} + diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h new file mode 100644 index 0000000..8036808 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.h @@ -0,0 +1,83 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA10_THERMAL_H +#define VEGA10_THERMAL_H + +#include "hwmgr.h" + +struct vega10_temperature { + uint16_t edge_temp; + uint16_t hot_spot_temp; + uint16_t hbm_temp; + uint16_t vr_soc_temp; + uint16_t vr_mem_temp; + uint16_t liquid1_temp; + uint16_t liquid2_temp; + uint16_t plx_temp; +}; + +#define VEGA10_THERMAL_HIGH_ALERT_MASK 0x1 +#define VEGA10_THERMAL_LOW_ALERT_MASK 0x2 + +#define VEGA10_THERMAL_MINIMUM_TEMP_READING -256 +#define VEGA10_THERMAL_MAXIMUM_TEMP_READING 255 + +#define VEGA10_THERMAL_MINIMUM_ALERT_TEMP 0 +#define VEGA10_THERMAL_MAXIMUM_ALERT_TEMP 255 + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + + +extern int tf_vega10_thermal_initialize(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result); +extern int tf_vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result); +extern int tf_vega10_thermal_enable_alert(struct pp_hwmgr *hwmgr, + void *input, void *output, void *storage, int result); + +extern int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr); +extern int vega10_thermal_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern int vega10_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, + struct phm_fan_speed_info *fan_speed_info); +extern int vega10_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t *speed); +extern int vega10_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr); +extern int vega10_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, + uint32_t mode); +extern int vega10_fan_ctrl_set_fan_speed_percent(struct pp_hwmgr *hwmgr, + uint32_t speed); +extern int vega10_fan_ctrl_reset_fan_speed_to_default(struct pp_hwmgr *hwmgr); +extern int pp_vega10_thermal_initialize(struct pp_hwmgr *hwmgr); +extern int vega10_thermal_ctrl_uninitialize_thermal_controller( + struct pp_hwmgr *hwmgr); +extern int vega10_fan_ctrl_set_fan_speed_rpm(struct pp_hwmgr *hwmgr, + uint32_t speed); +extern int vega10_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, + uint32_t *speed); +extern int vega10_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr); +extern uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr); + +#endif + diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 7de9bea..320225d 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -85,6 +85,7 @@ enum PP_FEATURE_MASK { PP_CLOCK_STRETCH_MASK = 0x400, PP_OD_FUZZY_FAN_CONTROL_MASK = 0x800, PP_SOCCLK_DPM_MASK = 0x1000, + PP_DCEFCLK_DPM_MASK = 0x2000, }; enum PHM_BackEnd_Magic { @@ -820,6 +821,8 @@ extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t ma extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr); extern int smu7_init_function_pointers(struct pp_hwmgr *hwmgr); +extern int vega10_hwmgr_init(struct pp_hwmgr *hwmgr); + extern int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t id, uint16_t *voltage); diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h new file mode 100644 index 0000000..227d999 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h @@ -0,0 +1,48 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef PP_SOC15_H +#define PP_SOC15_H + +#include "vega10/soc15ip.h" + +inline static uint32_t soc15_get_register_offset( + uint32_t hw_id, + uint32_t inst, + uint32_t segment, + uint32_t offset) +{ + uint32_t reg = 0; + + if (hw_id == THM_HWID) + reg = THM_BASE.instance[inst].segment[segment] + offset; + else if (hw_id == NBIF_HWID) + reg = NBIF_BASE.instance[inst].segment[segment] + offset; + else if (hw_id == MP1_HWID) + reg = MP1_BASE.instance[inst].segment[segment] + offset; + else if (hw_id == DF_HWID) + reg = DF_BASE.instance[inst].segment[segment] + offset; + + return reg; +} + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h index 52f56f4..37f4121 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h @@ -38,6 +38,7 @@ extern const struct pp_smumgr_func iceland_smu_funcs; extern const struct pp_smumgr_func tonga_smu_funcs; extern const struct pp_smumgr_func fiji_smu_funcs; extern const struct pp_smumgr_func polaris10_smu_funcs; +extern const struct pp_smumgr_func vega10_smu_funcs; enum AVFS_BTC_STATUS { AVFS_BTC_BOOT = 0, @@ -177,6 +178,8 @@ extern int smu_allocate_memory(void *device, uint32_t size, void **kptr, void *handle); extern int smu_free_memory(void *device, void *handle); +extern int vega10_smum_init(struct pp_smumgr *smumgr); + extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile index 51ff083..68b01b5 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/smumgr/Makefile @@ -4,7 +4,7 @@ SMU_MGR = smumgr.o cz_smumgr.o tonga_smumgr.o fiji_smumgr.o fiji_smc.o \ polaris10_smumgr.o iceland_smumgr.o polaris10_smc.o tonga_smc.o \ - smu7_smumgr.o iceland_smc.o + smu7_smumgr.o iceland_smc.o vega10_smumgr.o AMD_PP_SMUMGR = $(addprefix $(AMD_PP_PATH)/smumgr/,$(SMU_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index 454f445..c0d7576 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -86,6 +86,15 @@ int smum_early_init(struct pp_instance *handle) return -EINVAL; } break; + case AMDGPU_FAMILY_AI: + switch (smumgr->chip_id) { + case CHIP_VEGA10: + smumgr->smumgr_funcs = &vega10_smu_funcs; + break; + default: + return -EINVAL; + } + break; default: kfree(smumgr); return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c new file mode 100644 index 0000000..2685f02 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -0,0 +1,564 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "smumgr.h" +#include "vega10_inc.h" +#include "pp_soc15.h" +#include "vega10_smumgr.h" +#include "vega10_ppsmc.h" +#include "smu9_driver_if.h" + +#include "ppatomctrl.h" +#include "pp_debug.h" +#include "smu_ucode_xfer_vi.h" +#include "smu7_smumgr.h" + +#define AVFS_EN_MSB 1568 +#define AVFS_EN_LSB 1568 + +#define VOLTAGE_SCALE 4 + +/* Microcode file is stored in this buffer */ +#define BUFFER_SIZE 80000 +#define MAX_STRING_SIZE 15 +#define BUFFER_SIZETWO 131072 /* 128 *1024 */ + +/* MP Apertures */ +#define MP0_Public 0x03800000 +#define MP0_SRAM 0x03900000 +#define MP1_Public 0x03b00000 +#define MP1_SRAM 0x03c00004 + +#define smnMP1_FIRMWARE_FLAGS 0x3010028 +#define smnMP0_FW_INTF 0x3010104 +#define smnMP1_PUB_CTRL 0x3010b14 + +static bool vega10_is_smc_ram_running(struct pp_smumgr *smumgr) +{ + uint32_t mp1_fw_flags, reg; + + reg = soc15_get_register_offset(NBIF_HWID, 0, + mmPCIE_INDEX2_BASE_IDX, mmPCIE_INDEX2); + + cgs_write_register(smumgr->device, reg, + (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); + + reg = soc15_get_register_offset(NBIF_HWID, 0, + mmPCIE_DATA2_BASE_IDX, mmPCIE_DATA2); + + mp1_fw_flags = cgs_read_register(smumgr->device, reg); + + if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) + return true; + + return false; +} + +/** +* Check if SMC has responded to previous message. +* +* @param smumgr the address of the powerplay hardware manager. +* @return TRUE SMC has responded, FALSE otherwise. +*/ +static uint32_t vega10_wait_for_response(struct pp_smumgr *smumgr) +{ + uint32_t reg; + + if (!vega10_is_smc_ram_running(smumgr)) + return -1; + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); + + smum_wait_for_register_unequal(smumgr, reg, + 0, MP1_C2PMSG_90__CONTENT_MASK); + + return cgs_read_register(smumgr->device, reg); +} + +/** +* Send a message to the SMC, and do not wait for its response. +* +* @param smumgr the address of the powerplay hardware manager. +* @param msg the message to send. +* @return Always return 0. +*/ +int vega10_send_msg_to_smc_without_waiting(struct pp_smumgr *smumgr, + uint16_t msg) +{ + uint32_t reg; + + if (!vega10_is_smc_ram_running(smumgr)) + return -1; + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_66_BASE_IDX, mmMP1_SMN_C2PMSG_66); + cgs_write_register(smumgr->device, reg, msg); + + return 0; +} + +/** +* Send a message to the SMC, and wait for its response. +* +* @param smumgr the address of the powerplay hardware manager. +* @param msg the message to send. +* @return The response that came from the SMC. +*/ +int vega10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) +{ + uint32_t reg; + + if (!vega10_is_smc_ram_running(smumgr)) + return -1; + + vega10_wait_for_response(smumgr); + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); + cgs_write_register(smumgr->device, reg, 0); + + vega10_send_msg_to_smc_without_waiting(smumgr, msg); + + PP_ASSERT_WITH_CODE(vega10_wait_for_response(smumgr) == 1, + "Failed to send Message.", + return -1); + + return 0; +} + +/** + * Send a message to the SMC with parameter + * @param smumgr: the address of the powerplay hardware manager. + * @param msg: the message to send. + * @param parameter: the parameter to send + * @return The response that came from the SMC. + */ +int vega10_send_msg_to_smc_with_parameter(struct pp_smumgr *smumgr, + uint16_t msg, uint32_t parameter) +{ + uint32_t reg; + + if (!vega10_is_smc_ram_running(smumgr)) + return -1; + + vega10_wait_for_response(smumgr); + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_90_BASE_IDX, mmMP1_SMN_C2PMSG_90); + cgs_write_register(smumgr->device, reg, 0); + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); + cgs_write_register(smumgr->device, reg, parameter); + + vega10_send_msg_to_smc_without_waiting(smumgr, msg); + + PP_ASSERT_WITH_CODE(vega10_wait_for_response(smumgr) == 1, + "Failed to send Message.", + return -1); + + return 0; +} + + +/** +* Send a message to the SMC with parameter, do not wait for response +* +* @param smumgr: the address of the powerplay hardware manager. +* @param msg: the message to send. +* @param parameter: the parameter to send +* @return The response that came from the SMC. +*/ +int vega10_send_msg_to_smc_with_parameter_without_waiting( + struct pp_smumgr *smumgr, uint16_t msg, uint32_t parameter) +{ + uint32_t reg; + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); + cgs_write_register(smumgr->device, reg, parameter); + + return vega10_send_msg_to_smc_without_waiting(smumgr, msg); +} + +/** +* Retrieve an argument from SMC. +* +* @param smumgr the address of the powerplay hardware manager. +* @param arg pointer to store the argument from SMC. +* @return Always return 0. +*/ +int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg) +{ + uint32_t reg; + + reg = soc15_get_register_offset(MP1_HWID, 0, + mmMP1_SMN_C2PMSG_82_BASE_IDX, mmMP1_SMN_C2PMSG_82); + + *arg = cgs_read_register(smumgr->device, reg); + + return 0; +} + +/** +* Copy table from SMC into driver FB +* @param smumgr the address of the SMC manager +* @param table_id the driver's table ID to copy from +*/ +int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, + uint8_t *table, int16_t table_id) +{ + struct vega10_smumgr *priv = + (struct vega10_smumgr *)(smumgr->backend); + + PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, + "Invalid SMU Table ID!", return -1;); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, + "Invalid SMU Table version!", return -1;); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, + "Invalid SMU Table Length!", return -1;); + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetDriverDramAddrHigh, + priv->smu_tables.entry[table_id].table_addr_high) == 0, + "[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -1;); + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetDriverDramAddrLow, + priv->smu_tables.entry[table_id].table_addr_low) == 0, + "[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!", + return -1;); + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_TransferTableSmu2Dram, + priv->smu_tables.entry[table_id].table_id) == 0, + "[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!", + return -1;); + + memcpy(table, priv->smu_tables.entry[table_id].table, + priv->smu_tables.entry[table_id].size); + + return 0; +} + +/** +* Copy table from Driver FB into SMC +* @param smumgr the address of the SMC manager +* @param table_id the table to copy from +*/ +int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, + uint8_t *table, int16_t table_id) +{ + struct vega10_smumgr *priv = + (struct vega10_smumgr *)(smumgr->backend); + + PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, + "Invalid SMU Table ID!", return -1;); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, + "Invalid SMU Table version!", return -1;); + PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0, + "Invalid SMU Table Length!", return -1;); + + memcpy(priv->smu_tables.entry[table_id].table, table, + priv->smu_tables.entry[table_id].size); + + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetDriverDramAddrHigh, + priv->smu_tables.entry[table_id].table_addr_high) == 0, + "[CopyTableToSMC] Attempt to Set Dram Addr High Failed!", + return -1;); + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetDriverDramAddrLow, + priv->smu_tables.entry[table_id].table_addr_low) == 0, + "[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!", + return -1;); + PP_ASSERT_WITH_CODE(vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_TransferTableDram2Smu, + priv->smu_tables.entry[table_id].table_id) == 0, + "[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!", + return -1;); + + return 0; +} + +int vega10_perform_btc(struct pp_smumgr *smumgr) +{ + PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc_with_parameter( + smumgr, PPSMC_MSG_RunBtc, 0), + "Attempt to run DC BTC Failed!", + return -1); + return 0; +} + +int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table) +{ + PP_ASSERT_WITH_CODE(avfs_table, + "No access to SMC AVFS Table", + return -1); + + return vega10_copy_table_from_smc(smumgr, avfs_table, AVFSTABLE); +} + +int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table) +{ + PP_ASSERT_WITH_CODE(avfs_table, + "No access to SMC AVFS Table", + return -1); + + return vega10_copy_table_to_smc(smumgr, avfs_table, AVFSTABLE); +} + +int vega10_enable_smc_features(struct pp_smumgr *smumgr, + bool enable, uint32_t feature_mask) +{ + int msg = enable ? PPSMC_MSG_EnableSmuFeatures : + PPSMC_MSG_DisableSmuFeatures; + + return vega10_send_msg_to_smc_with_parameter(smumgr, + msg, feature_mask); +} + +int vega10_get_smc_features(struct pp_smumgr *smumgr, + uint32_t *features_enabled) +{ + if (!vega10_send_msg_to_smc(smumgr, + PPSMC_MSG_GetEnabledSmuFeatures)) { + if (!vega10_read_arg_from_smc(smumgr, features_enabled)) + return 0; + } + + return -1; +} + +int vega10_set_tools_address(struct pp_smumgr *smumgr) +{ + struct vega10_smumgr *priv = + (struct vega10_smumgr *)(smumgr->backend); + + if (priv->smu_tables.entry[TOOLSTABLE].table_addr_high || + priv->smu_tables.entry[TOOLSTABLE].table_addr_low) { + if (!vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetToolsDramAddrHigh, + priv->smu_tables.entry[TOOLSTABLE].table_addr_high)) + vega10_send_msg_to_smc_with_parameter(smumgr, + PPSMC_MSG_SetToolsDramAddrLow, + priv->smu_tables.entry[TOOLSTABLE].table_addr_low); + } + return 0; +} + +static int vega10_verify_smc_interface(struct pp_smumgr *smumgr) +{ + uint32_t smc_driver_if_version; + + PP_ASSERT_WITH_CODE(!vega10_send_msg_to_smc(smumgr, + PPSMC_MSG_GetDriverIfVersion), + "Attempt to get SMC IF Version Number Failed!", + return -1); + PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(smumgr, + &smc_driver_if_version), + "Attempt to read SMC IF Version Number Failed!", + return -1); + + if (smc_driver_if_version != SMU9_DRIVER_IF_VERSION) + return -1; + + return 0; +} + +/** +* Write a 32bit value to the SMC SRAM space. +* ALL PARAMETERS ARE IN HOST BYTE ORDER. +* @param smumgr the address of the powerplay hardware manager. +* @param smc_addr the address in the SMC RAM to access. +* @param value to write to the SMC SRAM. +*/ +static int vega10_smu_init(struct pp_smumgr *smumgr) +{ + struct vega10_smumgr *priv; + uint64_t mc_addr; + void *kaddr = NULL; + unsigned long handle, tools_size; + int ret; + struct cgs_firmware_info info = {0}; + + ret = cgs_get_firmware_info(smumgr->device, + smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), + &info); + if (ret || !info.kptr) + return -EINVAL; + + priv = kzalloc(sizeof(struct vega10_smumgr), GFP_KERNEL); + + if (!priv) + return -ENOMEM; + + smumgr->backend = priv; + + /* allocate space for pptable */ + smu_allocate_memory(smumgr->device, + sizeof(PPTable_t), + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &kaddr, + &handle); + + PP_ASSERT_WITH_CODE(kaddr, + "[vega10_smu_init] Out of memory for pptable.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)handle); + return -1); + + priv->smu_tables.entry[PPTABLE].version = 0x01; + priv->smu_tables.entry[PPTABLE].size = sizeof(PPTable_t); + priv->smu_tables.entry[PPTABLE].table_id = TABLE_PPTABLE; + priv->smu_tables.entry[PPTABLE].table_addr_high = + smu_upper_32_bits(mc_addr); + priv->smu_tables.entry[PPTABLE].table_addr_low = + smu_lower_32_bits(mc_addr); + priv->smu_tables.entry[PPTABLE].table = kaddr; + priv->smu_tables.entry[PPTABLE].handle = handle; + + /* allocate space for watermarks table */ + smu_allocate_memory(smumgr->device, + sizeof(Watermarks_t), + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &kaddr, + &handle); + + PP_ASSERT_WITH_CODE(kaddr, + "[vega10_smu_init] Out of memory for wmtable.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)handle); + return -1); + + priv->smu_tables.entry[WMTABLE].version = 0x01; + priv->smu_tables.entry[WMTABLE].size = sizeof(Watermarks_t); + priv->smu_tables.entry[WMTABLE].table_id = TABLE_WATERMARKS; + priv->smu_tables.entry[WMTABLE].table_addr_high = + smu_upper_32_bits(mc_addr); + priv->smu_tables.entry[WMTABLE].table_addr_low = + smu_lower_32_bits(mc_addr); + priv->smu_tables.entry[WMTABLE].table = kaddr; + priv->smu_tables.entry[WMTABLE].handle = handle; + + /* allocate space for AVFS table */ + smu_allocate_memory(smumgr->device, + sizeof(AvfsTable_t), + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &kaddr, + &handle); + + PP_ASSERT_WITH_CODE(kaddr, + "[vega10_smu_init] Out of memory for avfs table.", + kfree(smumgr->backend); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)handle); + return -1); + + priv->smu_tables.entry[AVFSTABLE].version = 0x01; + priv->smu_tables.entry[AVFSTABLE].size = sizeof(AvfsTable_t); + priv->smu_tables.entry[AVFSTABLE].table_id = TABLE_AVFS; + priv->smu_tables.entry[AVFSTABLE].table_addr_high = + smu_upper_32_bits(mc_addr); + priv->smu_tables.entry[AVFSTABLE].table_addr_low = + smu_lower_32_bits(mc_addr); + priv->smu_tables.entry[AVFSTABLE].table = kaddr; + priv->smu_tables.entry[AVFSTABLE].handle = handle; + + tools_size = 0; + if (tools_size) { + smu_allocate_memory(smumgr->device, + tools_size, + CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB, + PAGE_SIZE, + &mc_addr, + &kaddr, + &handle); + + if (kaddr) { + priv->smu_tables.entry[TOOLSTABLE].version = 0x01; + priv->smu_tables.entry[TOOLSTABLE].size = tools_size; + priv->smu_tables.entry[TOOLSTABLE].table_id = TABLE_PMSTATUSLOG; + priv->smu_tables.entry[TOOLSTABLE].table_addr_high = + smu_upper_32_bits(mc_addr); + priv->smu_tables.entry[TOOLSTABLE].table_addr_low = + smu_lower_32_bits(mc_addr); + priv->smu_tables.entry[TOOLSTABLE].table = kaddr; + priv->smu_tables.entry[TOOLSTABLE].handle = handle; + } + } + + return 0; +} + +static int vega10_smu_fini(struct pp_smumgr *smumgr) +{ + struct vega10_smumgr *priv = + (struct vega10_smumgr *)(smumgr->backend); + + if (priv) { + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[PPTABLE].handle); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[WMTABLE].handle); + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[AVFSTABLE].handle); + if (priv->smu_tables.entry[TOOLSTABLE].table) + cgs_free_gpu_mem(smumgr->device, + (cgs_handle_t)priv->smu_tables.entry[TOOLSTABLE].handle); + kfree(smumgr->backend); + smumgr->backend = NULL; + } + return 0; +} + +static int vega10_start_smu(struct pp_smumgr *smumgr) +{ + PP_ASSERT_WITH_CODE(!vega10_verify_smc_interface(smumgr), + "Failed to verify SMC interface!", + return -1); + return 0; +} + +const struct pp_smumgr_func vega10_smu_funcs = { + .smu_init = &vega10_smu_init, + .smu_fini = &vega10_smu_fini, + .start_smu = &vega10_start_smu, + .request_smu_load_specific_fw = NULL, + .send_msg_to_smc = &vega10_send_msg_to_smc, + .send_msg_to_smc_with_parameter = &vega10_send_msg_to_smc_with_parameter, + .download_pptable_settings = NULL, + .upload_pptable_settings = NULL, +}; diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h new file mode 100644 index 0000000..ad05021 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.h @@ -0,0 +1,70 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _VEGA10_SMUMANAGER_H_ +#define _VEGA10_SMUMANAGER_H_ + +#include "vega10_hwmgr.h" + +enum smu_table_id { + PPTABLE = 0, + WMTABLE, + AVFSTABLE, + TOOLSTABLE, + MAX_SMU_TABLE, +}; + +struct smu_table_entry { + uint32_t version; + uint32_t size; + uint32_t table_id; + uint32_t table_addr_high; + uint32_t table_addr_low; + uint8_t *table; + unsigned long handle; +}; + +struct smu_table_array { + struct smu_table_entry entry[MAX_SMU_TABLE]; +}; + +struct vega10_smumgr { + struct smu_table_array smu_tables; +}; + +int vega10_read_arg_from_smc(struct pp_smumgr *smumgr, uint32_t *arg); +int vega10_copy_table_from_smc(struct pp_smumgr *smumgr, + uint8_t *table, int16_t table_id); +int vega10_copy_table_to_smc(struct pp_smumgr *smumgr, + uint8_t *table, int16_t table_id); +int vega10_enable_smc_features(struct pp_smumgr *smumgr, + bool enable, uint32_t feature_mask); +int vega10_get_smc_features(struct pp_smumgr *smumgr, + uint32_t *features_enabled); +int vega10_save_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table); +int vega10_restore_vft_table(struct pp_smumgr *smumgr, uint8_t *avfs_table); +int vega10_perform_btc(struct pp_smumgr *smumgr); + +int vega10_set_tools_address(struct pp_smumgr *smumgr); + +#endif + -- 2.5.5