On Mon, Dec 9, 2019 at 12:16 PM Tao, Yintian <Yintian.Tao@xxxxxxx> wrote: > > Hi Alex > > Many thanks for your review. Please see inline comments. I will submit the v3 patch according to your comment. Thanks again. > > Best Regards > Yintian Tao > > -----Original Message----- > From: Alex Deucher <alexdeucher@xxxxxxxxx> > Sent: 2019年12月10日 0:52 > To: Tao, Yintian <Yintian.Tao@xxxxxxx> > Cc: Deucher, Alexander <Alexander.Deucher@xxxxxxx>; Feng, Kenneth <Kenneth.Feng@xxxxxxx>; amd-gfx list <amd-gfx@xxxxxxxxxxxxxxxxxxxxx> > Subject: Re: [PATCH] drm/amd/powerplay: enable pp one vf mode for vega10 > > On Mon, Dec 9, 2019 at 5:39 AM Yintian Tao <yttao@xxxxxxx> wrote: > > > > Originally, due to the restriction from PSP and SMU, VF has > > to send message to hypervisor driver to handle powerplay > > change which is complicated and redundant. Currently, SMU > > and PSP can support VF to directly handle powerplay > > change by itself. Therefore, the old code about the handshake > > between VF and PF to handle powerplay will be removed and VF > > will use new the registers below to handshake with SMU. > > mmMP1_SMN_C2PMSG_101: register to handle SMU message > > mmMP1_SMN_C2PMSG_102: register to handle SMU parameter > > mmMP1_SMN_C2PMSG_103: register to handle SMU response > > > > v2: remove module parameter pp_one_vf > > > > Signed-off-by: Yintian Tao <yttao@xxxxxxx> > > --- > > drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 13 +- > > drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 4 - > > drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | 121 +++++++------- > > drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 51 ------ > > drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 14 +- > > drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c | 78 --------- > > drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h | 4 - > > drivers/gpu/drm/amd/amdgpu/soc15.c | 8 +- > > drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 4 +- > > drivers/gpu/drm/amd/powerplay/amdgpu_smu.c | 24 +-- > > .../drm/amd/powerplay/hwmgr/hardwaremanager.c | 15 +- > > drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | 7 + > > drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c | 30 ++-- > > .../drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 157 ++++++++++++------ > > drivers/gpu/drm/amd/powerplay/inc/hwmgr.h | 1 + > > .../drm/amd/powerplay/smumgr/smu9_smumgr.c | 55 ++++-- > > .../drm/amd/powerplay/smumgr/vega10_smumgr.c | 6 + > > 17 files changed, 280 insertions(+), 312 deletions(-) > > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c > > index b9ca7e728d3e..30eb42593a20 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c > > @@ -1880,6 +1880,9 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) > > } > > } > > > > + if (amdgpu_sriov_vf(adev)) > > + amdgpu_virt_init_data_exchange(adev); > > + > > r = amdgpu_ib_pool_init(adev); > > if (r) { > > dev_err(adev->dev, "IB initialization failed (%d).\n", r); > > @@ -1922,8 +1925,6 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) > > > > init_failed: > > if (amdgpu_sriov_vf(adev)) { > > - if (!r) > > - amdgpu_virt_init_data_exchange(adev); > > amdgpu_virt_release_full_gpu(adev, true); > > } > > > > @@ -2819,7 +2820,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, > > mutex_init(&adev->virt.vf_errors.lock); > > hash_init(adev->mn_hash); > > mutex_init(&adev->lock_reset); > > - mutex_init(&adev->virt.dpm_mutex); > > mutex_init(&adev->psp.mutex); > > > > r = amdgpu_device_check_arguments(adev); > > @@ -3040,9 +3040,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, > > > > amdgpu_fbdev_init(adev); > > > > - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) > > - amdgpu_pm_virt_sysfs_init(adev); > > - > > r = amdgpu_pm_sysfs_init(adev); > > if (r) { > > adev->pm_sysfs_en = false; > > @@ -3187,8 +3184,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) > > iounmap(adev->rmmio); > > adev->rmmio = NULL; > > amdgpu_device_doorbell_fini(adev); > > - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev)) > > - amdgpu_pm_virt_sysfs_fini(adev); > > > > amdgpu_debugfs_regs_cleanup(adev); > > device_remove_file(adev->dev, &dev_attr_pcie_replay_count); > > @@ -3669,6 +3664,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, > > if (r) > > goto error; > > > > + amdgpu_virt_init_data_exchange(adev); > > /* we need recover gart prior to run SMC/CP/SDMA resume */ > > amdgpu_gtt_mgr_recover(&adev->mman.bdev.man[TTM_PL_TT]); > > > > @@ -3686,7 +3682,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, > > amdgpu_amdkfd_post_reset(adev); > > > > error: > > - amdgpu_virt_init_data_exchange(adev); > > amdgpu_virt_release_full_gpu(adev, true); > > if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { > > amdgpu_inc_vram_lost(adev); > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c > > index 5ec1415d1755..3a0ea9096498 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c > > @@ -703,10 +703,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file > > if (adev->pm.dpm_enabled) { > > dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10; > > dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10; > > - } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && > > - adev->virt.ops->get_pp_clk) { > > - dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10; > > - dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10; > > } else { > > dev_info.max_engine_clock = adev->clock.default_sclk * 10; > > dev_info.max_memory_clock = adev->clock.default_mclk * 10; > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c > > index 0c7324bc31a7..b8a42ebb2f5a 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c > > @@ -160,7 +160,8 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, > > enum amd_pm_state_type pm; > > > > if (is_support_sw_smu(adev)) { > > - if (adev->smu.ppt_funcs->get_current_power_state) > > + if (adev->smu.ppt_funcs && > > + adev->smu.ppt_funcs->get_current_power_state) > > This looks like a bug fix which should be broken out separately. > [yttao]: Thanks a lot. I will separate it. > > > pm = smu_get_current_power_state(&adev->smu); > > else > > pm = adev->pm.dpm.user_state; > > @@ -283,7 +284,7 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, > > struct amdgpu_device *adev = ddev->dev_private; > > enum amd_dpm_forced_level level = 0xff; > > > > - if (amdgpu_sriov_vf(adev)) > > + if (amdgpu_sriov_vf(adev) && !amdgim_is_pp_one_vf(adev)) > > return 0; > > > > if ((adev->flags & AMD_IS_PX) && > > @@ -320,6 +321,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, > > enum amd_dpm_forced_level current_level = 0xff; > > int ret = 0; > > > > + if (amdgpu_sriov_vf(adev) && !amdgim_is_pp_one_vf(adev)) > > + return -EINVAL; > > + > > /* Can't force performance level when the card is off */ > > if ((adev->flags & AMD_IS_PX) && > > (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) > > @@ -348,19 +352,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, > > goto fail; > > } > > > > - /* handle sriov case here */ > > - if (amdgpu_sriov_vf(adev)) { > > - if (amdgim_is_hwperf(adev) && > > - adev->virt.ops->force_dpm_level) { > > - mutex_lock(&adev->pm.mutex); > > - adev->virt.ops->force_dpm_level(adev, level); > > - mutex_unlock(&adev->pm.mutex); > > - return count; > > - } else { > > - return -EINVAL; > > - } > > - } > > - > > if (is_support_sw_smu(adev)) > > current_level = smu_get_performance_level(&adev->smu); > > else if (adev->powerplay.pp_funcs->get_performance_level) > > @@ -654,6 +645,9 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, > > const char delimiter[3] = {' ', '\n', '\0'}; > > uint32_t type; > > > > + if (amdgpu_sriov_vf(adev) && !amdgim_is_pp_one_vf(adev)) > > + return 0; > > + > > if (count > 127) > > return -EINVAL; > > > > @@ -841,9 +835,10 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, > > struct drm_device *ddev = dev_get_drvdata(dev); > > struct amdgpu_device *adev = ddev->dev_private; > > > > - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && > > - adev->virt.ops->get_pp_clk) > > - return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); > > + if (amdgpu_sriov_vf(adev)) { > > + if (!amdgim_is_pp_one_vf(adev)) > > + return -EINVAL; > > + } > > > > if (is_support_sw_smu(adev)) > > return smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); > > @@ -878,7 +873,6 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) > > while (tmp[0]) { > > sub_str = strsep(&tmp, delimiter); > > if (strlen(sub_str)) { > > - ret = kstrtol(sub_str, 0, &level); > > if (ret) > > return -EINVAL; > > *mask |= 1 << level; > > @@ -899,8 +893,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, > > int ret; > > uint32_t mask = 0; > > > > - if (amdgpu_sriov_vf(adev)) > > - return 0; > > + if (amdgpu_sriov_vf(adev)) { > > + if (!amdgim_is_pp_one_vf(adev)) > > + return -EINVAL; > > + } > > > > ret = amdgpu_read_mask(buf, count, &mask); > > if (ret) > > @@ -924,9 +920,10 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, > > struct drm_device *ddev = dev_get_drvdata(dev); > > struct amdgpu_device *adev = ddev->dev_private; > > > > - if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) && > > - adev->virt.ops->get_pp_clk) > > - return adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); > > + if (amdgpu_sriov_vf(adev)) { > > + if (!amdgim_is_pp_one_vf(adev)) > > + return -EINVAL; > > + } > > > > if (is_support_sw_smu(adev)) > > return smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); > > @@ -946,8 +943,11 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, > > int ret; > > uint32_t mask = 0; > > > > - if (amdgpu_sriov_vf(adev)) > > - return 0; > > + if (amdgpu_sriov_vf(adev)) { > > + if (!amdgim_is_pp_one_vf(adev)) > > + return -EINVAL; > > + > > + } > > > > ret = amdgpu_read_mask(buf, count, &mask); > > if (ret) > > @@ -971,6 +971,11 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, > > struct drm_device *ddev = dev_get_drvdata(dev); > > struct amdgpu_device *adev = ddev->dev_private; > > > > + if (amdgpu_sriov_vf(adev)) { > > + if (!amdgim_is_pp_one_vf(adev)) > > + return -EINVAL; > > + } > > + > > if (is_support_sw_smu(adev)) > > return smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); > > else if (adev->powerplay.pp_funcs->print_clock_levels) > > @@ -989,6 +994,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, > > int ret; > > uint32_t mask = 0; > > > > + if (amdgpu_sriov_vf(adev)) > > + return 0; > > + > > ret = amdgpu_read_mask(buf, count, &mask); > > if (ret) > > return ret; > > @@ -1029,6 +1037,11 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, > > int ret; > > uint32_t mask = 0; > > > > + if (amdgpu_sriov_vf(adev)) { > > + if (!amdgim_is_pp_one_vf(adev)) > > + return -EINVAL; > > + } > > + > > ret = amdgpu_read_mask(buf, count, &mask); > > if (ret) > > return ret; > > @@ -1051,6 +1064,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, > > struct drm_device *ddev = dev_get_drvdata(dev); > > struct amdgpu_device *adev = ddev->dev_private; > > > > + if (amdgpu_sriov_vf(adev)) > > + return 0; > > + > > if (is_support_sw_smu(adev)) > > return smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); > > else if (adev->powerplay.pp_funcs->print_clock_levels) > > @@ -1069,6 +1085,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, > > int ret; > > uint32_t mask = 0; > > > > + if (amdgpu_sriov_vf(adev)) > > + return 0; > > + > > ret = amdgpu_read_mask(buf, count, &mask); > > if (ret) > > return ret; > > @@ -1091,6 +1110,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, > > struct drm_device *ddev = dev_get_drvdata(dev); > > struct amdgpu_device *adev = ddev->dev_private; > > > > + if (amdgpu_sriov_vf(adev)) > > + return 0; > > + > > if (is_support_sw_smu(adev)) > > return smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); > > else if (adev->powerplay.pp_funcs->print_clock_levels) > > @@ -1109,6 +1131,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, > > int ret; > > uint32_t mask = 0; > > > > + if (amdgpu_sriov_vf(adev)) > > + return 0; > > + > > ret = amdgpu_read_mask(buf, count, &mask); > > if (ret) > > return ret; > > @@ -1150,6 +1175,9 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, > > int ret; > > long int value; > > > > + if (amdgpu_sriov_vf(adev) && !amdgim_is_pp_one_vf(adev)) > > + return 0; > > + > > ret = kstrtol(buf, 0, &value); > > > > if (ret) { > > @@ -1201,6 +1229,9 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, > > int ret; > > long int value; > > > > + if (amdgpu_sriov_vf(adev) && !amdgim_is_pp_one_vf(adev)) > > + return 0; > > + > > ret = kstrtol(buf, 0, &value); > > > > if (ret) { > > @@ -2730,44 +2761,6 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) > > } > > } > > > > -int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev) > > -{ > > - int ret = 0; > > - > > - if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) > > - return ret; > > - > > - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); > > - if (ret) { > > - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); > > - return ret; > > - } > > - > > - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); > > - if (ret) { > > - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); > > - return ret; > > - } > > - > > - ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); > > - if (ret) { > > - DRM_ERROR("failed to create device file for dpm state\n"); > > - return ret; > > - } > > - > > - return ret; > > -} > > - > > -void amdgpu_pm_virt_sysfs_fini(struct amdgpu_device *adev) > > -{ > > - if (!(amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev))) > > - return; > > - > > - device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); > > - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); > > - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); > > -} > > - > > Are you sure you want to remove this functionality for the other > cases? Won't this regress multi-VF pm? Or was this functionality > always single VF, but just handled in the host rather than the guest? > [yttao]: yes, under multi-VF condition, the VF can't send dpm request because it will influence other running VF > The pp one vf mode will only work with only single VF enabled. The legacy virt sysfs interface will not work with > latest host driver and host driver will not support the legacy mode anymore. Ok. As long as we don't care about backwards compat with old GIM, this seems fine. Alex > > > > int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) > > { > > int r; > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c > > index e32ae906d797..103033f96f13 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c > > @@ -379,54 +379,3 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) > > } > > } > > } > > - > > -static uint32_t parse_clk(char *buf, bool min) > > -{ > > - char *ptr = buf; > > - uint32_t clk = 0; > > - > > - do { > > - ptr = strchr(ptr, ':'); > > - if (!ptr) > > - break; > > - ptr+=2; > > - if (kstrtou32(ptr, 10, &clk)) > > - return 0; > > - } while (!min); > > - > > - return clk * 100; > > -} > > - > > -uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest) > > -{ > > - char *buf = NULL; > > - uint32_t clk = 0; > > - > > - buf = kzalloc(PAGE_SIZE, GFP_KERNEL); > > - if (!buf) > > - return -ENOMEM; > > - > > - adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf); > > - clk = parse_clk(buf, lowest); > > - > > - kfree(buf); > > - > > - return clk; > > -} > > - > > -uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest) > > -{ > > - char *buf = NULL; > > - uint32_t clk = 0; > > - > > - buf = kzalloc(PAGE_SIZE, GFP_KERNEL); > > - if (!buf) > > - return -ENOMEM; > > - > > - adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf); > > - clk = parse_clk(buf, lowest); > > - > > - kfree(buf); > > - > > - return clk; > > -} > > Same comment here as above regarding multi-VF pm. > > > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h > > index b0b2bdc750df..63db2a6f008d 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h > > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h > > @@ -57,8 +57,6 @@ struct amdgpu_virt_ops { > > int (*reset_gpu)(struct amdgpu_device *adev); > > int (*wait_reset)(struct amdgpu_device *adev); > > void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3); > > - int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf); > > - int (*force_dpm_level)(struct amdgpu_device *adev, u32 level); > > }; > > > > /* > > @@ -85,8 +83,8 @@ enum AMDGIM_FEATURE_FLAG { > > AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2, > > /* VRAM LOST by GIM */ > > AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4, > > - /* HW PERF SIM in GIM */ > > - AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3), > > + /* PP ONE VF MODE in GIM */ > > + AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), > > }; > > > > struct amd_sriov_msg_pf2vf_info_header { > > @@ -257,8 +255,6 @@ struct amdgpu_virt { > > struct amdgpu_vf_error_buffer vf_errors; > > struct amdgpu_virt_fw_reserve fw_reserve; > > uint32_t gim_feature; > > - /* protect DPM events to GIM */ > > - struct mutex dpm_mutex; > > uint32_t reg_access_mode; > > }; > > > > @@ -286,8 +282,8 @@ static inline bool is_virtual_machine(void) > > #endif > > } > > > > -#define amdgim_is_hwperf(adev) \ > > - ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION) > > +#define amdgim_is_pp_one_vf(adev) \ > > + ((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF) > > > > bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); > > void amdgpu_virt_init_setting(struct amdgpu_device *adev); > > @@ -306,6 +302,4 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size, > > unsigned int key, > > unsigned int chksum); > > void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev); > > -uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest); > > -uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest); > > #endif > > diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c > > index cc5bf595f9b1..43305afa3d6f 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c > > +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c > > @@ -158,82 +158,6 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev, > > xgpu_ai_mailbox_set_valid(adev, false); > > } > > > > -static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf) > > -{ > > - int r = 0; > > - u32 req, val, size; > > - > > - if (!amdgim_is_hwperf(adev) || buf == NULL) > > - return -EBADRQC; > > - > > - switch(type) { > > - case PP_SCLK: > > - req = IDH_IRQ_GET_PP_SCLK; > > - break; > > - case PP_MCLK: > > - req = IDH_IRQ_GET_PP_MCLK; > > - break; > > - default: > > - return -EBADRQC; > > - } > > - > > - mutex_lock(&adev->virt.dpm_mutex); > > - > > - xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0); > > - > > - r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); > > - if (!r && adev->fw_vram_usage.va != NULL) { > > - val = RREG32_NO_KIQ( > > - SOC15_REG_OFFSET(NBIO, 0, > > - mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1)); > > - size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) + > > - val), PAGE_SIZE); > > - > > - if (size < PAGE_SIZE) > > - strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val)); > > - else > > - size = 0; > > - > > - r = size; > > - goto out; > > - } > > - > > - r = xgpu_ai_poll_msg(adev, IDH_FAIL); > > - if(r) > > - pr_info("%s DPM request failed", > > - (type == PP_SCLK)? "SCLK" : "MCLK"); > > - > > -out: > > - mutex_unlock(&adev->virt.dpm_mutex); > > - return r; > > -} > > - > > -static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level) > > -{ > > - int r = 0; > > - u32 req = IDH_IRQ_FORCE_DPM_LEVEL; > > - > > - if (!amdgim_is_hwperf(adev)) > > - return -EBADRQC; > > - > > - mutex_lock(&adev->virt.dpm_mutex); > > - xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0); > > - > > - r = xgpu_ai_poll_msg(adev, IDH_SUCCESS); > > - if (!r) > > - goto out; > > - > > - r = xgpu_ai_poll_msg(adev, IDH_FAIL); > > - if (!r) > > - pr_info("DPM request failed"); > > - else > > - pr_info("Mailbox is broken"); > > - > > -out: > > - mutex_unlock(&adev->virt.dpm_mutex); > > - return r; > > -} > > - > > static int xgpu_ai_send_access_requests(struct amdgpu_device *adev, > > enum idh_request req) > > { > > @@ -455,6 +379,4 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = { > > .reset_gpu = xgpu_ai_request_reset, > > .wait_reset = NULL, > > .trans_msg = xgpu_ai_mailbox_trans_msg, > > - .get_pp_clk = xgpu_ai_get_pp_clk, > > - .force_dpm_level = xgpu_ai_force_dpm_level, > > }; > > diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h > > index 077e91a33d62..37dbe0f2142f 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h > > +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h > > @@ -35,10 +35,6 @@ enum idh_request { > > IDH_REL_GPU_FINI_ACCESS, > > IDH_REQ_GPU_RESET_ACCESS, > > > > - IDH_IRQ_FORCE_DPM_LEVEL = 10, > > - IDH_IRQ_GET_PP_SCLK, > > - IDH_IRQ_GET_PP_MCLK, > > - > > IDH_LOG_VF_ERROR = 200, > > }; > > > > diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c > > index 48649f55f3ba..59f5f2a39fe4 100644 > > --- a/drivers/gpu/drm/amd/amdgpu/soc15.c > > +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c > > @@ -777,11 +777,11 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) > > } > > amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); > > amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); > > - if (!amdgpu_sriov_vf(adev)) { > > - if (is_support_sw_smu(adev)) > > + if (is_support_sw_smu(adev)) { > > + if (!amdgpu_sriov_vf(adev)) > > amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); > > - else > > - amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); > > + } else { > > + amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); > > } > > if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) > > amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); > > diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c > > index 7932eb163a00..5087d6bdba60 100644 > > --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c > > +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c > > @@ -48,7 +48,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev) > > > > hwmgr->adev = adev; > > hwmgr->not_vf = !amdgpu_sriov_vf(adev); > > - hwmgr->pm_en = (amdgpu_dpm && hwmgr->not_vf) ? true : false; > > hwmgr->device = amdgpu_cgs_create_device(adev); > > mutex_init(&hwmgr->smu_lock); > > hwmgr->chip_family = adev->family; > > @@ -276,6 +275,9 @@ static int pp_dpm_load_fw(void *handle) > > { > > struct pp_hwmgr *hwmgr = handle; > > > > + if (!hwmgr->not_vf) > > + return 0; > > + > > if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu) > > return -EINVAL; > > > > diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c > > index 36001a4f65d7..714b8381d02c 100644 > > --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c > > +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c > > @@ -1653,11 +1653,13 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, > > if (!smu->pm_enabled) > > return -EINVAL; > > > > - if (!skip_display_settings) { > > - ret = smu_display_config_changed(smu); > > - if (ret) { > > - pr_err("Failed to change display config!"); > > - return ret; > > + if (!amdgpu_sriov_vf(smu->adev)) { > > + if (!skip_display_settings) { > > + ret = smu_display_config_changed(smu); > > + if (ret) { > > + pr_err("Failed to change display config!"); > > + return ret; > > + } > > } > > } > > > > @@ -1667,11 +1669,13 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, > > return ret; > > } > > > > - if (!skip_display_settings) { > > - ret = smu_notify_smc_dispaly_config(smu); > > - if (ret) { > > - pr_err("Failed to notify smc display config!"); > > - return ret; > > + if (amdgpu_sriov_vf(smu->adev)) { > > + if (!skip_display_settings) { > > + ret = smu_notify_smc_dispaly_config(smu); > > + if (ret) { > > + pr_err("Failed to notify smc display config!"); > > + return ret; > > + } > > } > > } > > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c > > index cc57fb953e62..253860d30b20 100644 > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c > > @@ -81,8 +81,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) > > adev = hwmgr->adev; > > > > /* Skip for suspend/resume case */ > > - if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev) > > - && adev->in_suspend) { > > + if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr) > > + && !amdgpu_passthrough(adev) && adev->in_suspend) { > > pr_info("dpm has been enabled\n"); > > return 0; > > } > > @@ -200,6 +200,9 @@ int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr) > > { > > PHM_FUNC_CHECK(hwmgr); > > > > + if (!hwmgr->not_vf) > > + return 0; > > + > > if (hwmgr->hwmgr_func->stop_thermal_controller == NULL) > > return -EINVAL; > > > > @@ -237,6 +240,9 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr) > > TEMP_RANGE_MAX}; > > struct amdgpu_device *adev = hwmgr->adev; > > > > + if (!hwmgr->not_vf) > > + return 0; > > + > > if (hwmgr->hwmgr_func->get_thermal_temperature_range) > > hwmgr->hwmgr_func->get_thermal_temperature_range( > > hwmgr, &range); > > @@ -263,6 +269,8 @@ int phm_start_thermal_controller(struct pp_hwmgr *hwmgr) > > bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) > > { > > PHM_FUNC_CHECK(hwmgr); > > + if (hwmgr->pp_one_vf) > > + return false; > > > > if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) > > return false; > > @@ -482,6 +490,9 @@ int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr) > > { > > PHM_FUNC_CHECK(hwmgr); > > > > + if (!hwmgr->not_vf) > > + return 0; > > + > > if (hwmgr->hwmgr_func->disable_smc_firmware_ctf == NULL) > > return -EINVAL; > > > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c > > index d2909c91d65b..f8c81f963649 100644 > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c > > @@ -221,6 +221,9 @@ int hwmgr_hw_init(struct pp_hwmgr *hwmgr) > > { > > int ret = 0; > > > > + hwmgr->pp_one_vf = amdgim_is_pp_one_vf((struct amdgpu_device *)hwmgr->adev); > > + hwmgr->pm_en = (amdgpu_dpm && (hwmgr->not_vf || hwmgr->pp_one_vf)) > > + ? true : false; > > if (!hwmgr->pm_en) > > return 0; > > > > @@ -365,6 +368,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, > > > > switch (task_id) { > > case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: > > + if (!hwmgr->not_vf) > > + return ret; > > ret = phm_pre_display_configuration_changed(hwmgr); > > if (ret) > > return ret; > > @@ -381,6 +386,8 @@ int hwmgr_handle_task(struct pp_hwmgr *hwmgr, enum amd_pp_task task_id, > > enum PP_StateUILabel requested_ui_label; > > struct pp_power_state *requested_ps = NULL; > > > > + if (!hwmgr->not_vf) > > + return ret; > > if (user_state == NULL) { > > ret = -EINVAL; > > break; > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c > > index 6bf48934fdc4..31a32a79cfc2 100644 > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c > > @@ -262,20 +262,22 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set > > uint32_t index; > > long workload; > > > > - if (!skip_display_settings) > > - phm_display_configuration_changed(hwmgr); > > - > > - if (hwmgr->ps) > > - power_state_management(hwmgr, new_ps); > > - else > > - /* > > - * for vega12/vega20 which does not support power state manager > > - * DAL clock limits should also be honoured > > - */ > > - phm_apply_clock_adjust_rules(hwmgr); > > - > > - if (!skip_display_settings) > > - phm_notify_smc_display_config_after_ps_adjustment(hwmgr); > > + if (hwmgr->not_vf) { > > + if (!skip_display_settings) > > + phm_display_configuration_changed(hwmgr); > > + > > + if (hwmgr->ps) > > + power_state_management(hwmgr, new_ps); > > + else > > + /* > > + * for vega12/vega20 which does not support power state manager > > + * DAL clock limits should also be honoured > > + */ > > + phm_apply_clock_adjust_rules(hwmgr); > > + > > + if (!skip_display_settings) > > + phm_notify_smc_display_config_after_ps_adjustment(hwmgr); > > + } > > > > if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level)) > > hwmgr->dpm_level = hwmgr->request_dpm_level; > > diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c > > index b29e996df1d4..41a9b8699001 100644 > > --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c > > +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c > > @@ -912,6 +912,9 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) > > hwmgr->platform_descriptor.clockStep.memoryClock = 500; > > > > data->total_active_cus = adev->gfx.cu_info.number; > > + if (!hwmgr->not_vf) > > + return result; > > + > > /* Setup default Overdrive Fan control settings */ > > data->odn_fan_table.target_fan_speed = > > hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM; > > @@ -979,6 +982,9 @@ static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr) > > > > static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr) > > { > > + if (!hwmgr->not_vf) > > + return 0; > > + > > PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr), > > "Failed to init sclk threshold!", > > return -EINVAL); > > @@ -2503,6 +2509,9 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr) > > "Failed to setup default DPM tables!", > > return result); > > > > + if (!hwmgr->not_vf) > > + return 0; > > + > > /* initialize ODN table */ > > if (hwmgr->od_enabled) { > > if (odn_table->max_vddc) { > > @@ -2932,61 +2941,75 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) > > struct vega10_hwmgr *data = hwmgr->backend; > > int tmp_result, result = 0; > > > > - vega10_enable_disable_PCC_limit_feature(hwmgr, true); > > - > > - smum_send_msg_to_smc_with_parameter(hwmgr, > > - PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); > > + if (hwmgr->not_vf) { > > + vega10_enable_disable_PCC_limit_feature(hwmgr, true); > > > > - tmp_result = vega10_construct_voltage_tables(hwmgr); > > - PP_ASSERT_WITH_CODE(!tmp_result, > > - "Failed to construct voltage tables!", > > - result = tmp_result); > > + smum_send_msg_to_smc_with_parameter(hwmgr, > > + PPSMC_MSG_ConfigureTelemetry, data->config_telemetry); > > > > - tmp_result = vega10_init_smc_table(hwmgr); > > - PP_ASSERT_WITH_CODE(!tmp_result, > > - "Failed to initialize SMC table!", > > - result = tmp_result); > > + tmp_result = vega10_construct_voltage_tables(hwmgr); > > + PP_ASSERT_WITH_CODE(!tmp_result, > > + "Failed to construct voltage tables!", > > + result = tmp_result); > > + } > > > > - if (PP_CAP(PHM_PlatformCaps_ThermalController)) { > > - tmp_result = vega10_enable_thermal_protection(hwmgr); > > + if (hwmgr->not_vf || hwmgr->pp_one_vf) { > > + tmp_result = vega10_init_smc_table(hwmgr); > > PP_ASSERT_WITH_CODE(!tmp_result, > > - "Failed to enable thermal protection!", > > - result = tmp_result); > > + "Failed to initialize SMC table!", > > + result = tmp_result); > > } > > > > - tmp_result = vega10_enable_vrhot_feature(hwmgr); > > - PP_ASSERT_WITH_CODE(!tmp_result, > > - "Failed to enable VR hot feature!", > > - result = tmp_result); > > + if (hwmgr->not_vf) { > > + if (PP_CAP(PHM_PlatformCaps_ThermalController)) { > > + tmp_result = vega10_enable_thermal_protection(hwmgr); > > + PP_ASSERT_WITH_CODE(!tmp_result, > > + "Failed to enable thermal protection!", > > + result = tmp_result); > > + } > > > > - tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr); > > - PP_ASSERT_WITH_CODE(!tmp_result, > > - "Failed to enable deep sleep master switch!", > > - result = tmp_result); > > + tmp_result = vega10_enable_vrhot_feature(hwmgr); > > + PP_ASSERT_WITH_CODE(!tmp_result, > > + "Failed to enable VR hot feature!", > > + result = tmp_result); > > > > - tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES); > > - PP_ASSERT_WITH_CODE(!tmp_result, > > - "Failed to start DPM!", result = tmp_result); > > + tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr); > > + PP_ASSERT_WITH_CODE(!tmp_result, > > + "Failed to enable deep sleep master switch!", > > + result = tmp_result); > > + } > > > > - /* enable didt, do not abort if failed didt */ > > - tmp_result = vega10_enable_didt_config(hwmgr); > > - PP_ASSERT(!tmp_result, > > - "Failed to enable didt config!"); > > + if (hwmgr->not_vf) { > > + tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES); > > + PP_ASSERT_WITH_CODE(!tmp_result, > > + "Failed to start DPM!", result = tmp_result); > > + } > > > > - tmp_result = vega10_enable_power_containment(hwmgr); > > - PP_ASSERT_WITH_CODE(!tmp_result, > > - "Failed to enable power containment!", > > - result = tmp_result); > > + if (hwmgr->not_vf) { > > + /* enable didt, do not abort if failed didt */ > > + tmp_result = vega10_enable_didt_config(hwmgr); > > + PP_ASSERT(!tmp_result, > > + "Failed to enable didt config!"); > > + } > > > > - tmp_result = vega10_power_control_set_level(hwmgr); > > - PP_ASSERT_WITH_CODE(!tmp_result, > > - "Failed to power control set level!", > > - result = tmp_result); > > + if (hwmgr->not_vf) { > > + tmp_result = vega10_enable_power_containment(hwmgr); > > + PP_ASSERT_WITH_CODE(!tmp_result, > > + "Failed to enable power containment!", > > + result = tmp_result); > > + } > > > > - tmp_result = vega10_enable_ulv(hwmgr); > > - PP_ASSERT_WITH_CODE(!tmp_result, > > - "Failed to enable ULV!", > > - result = tmp_result); > > + if (hwmgr->not_vf) { > > + tmp_result = vega10_power_control_set_level(hwmgr); > > + PP_ASSERT_WITH_CODE(!tmp_result, > > + "Failed to power control set level!", > > + result = tmp_result); > > + > > + tmp_result = vega10_enable_ulv(hwmgr); > > + PP_ASSERT_WITH_CODE(!tmp_result, > > + "Failed to enable ULV!", > > + result = tmp_result); > > + } > > > > return result; > > } > > @@ -3080,11 +3103,22 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, > > performance_level->soc_clock = socclk_dep_table->entries > > [state_entry->ucSocClockIndexHigh].ulClk; > > if (gfxclk_dep_table->ucRevId == 0) { > > - performance_level->gfx_clock = gfxclk_dep_table->entries > > - [state_entry->ucGfxClockIndexHigh].ulClk; > > + /* under vega10 pp one vf mode, the gfx clk dpm need be lower > > + * to level-4 due to the limited 110w-power > > + */ > > + if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0)) > > + performance_level->gfx_clock = > > + gfxclk_dep_table->entries[4].ulClk; > > + else > > + performance_level->gfx_clock = gfxclk_dep_table->entries > > + [state_entry->ucGfxClockIndexHigh].ulClk; > > } else if (gfxclk_dep_table->ucRevId == 1) { > > patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries; > > - performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk; > > + if (hwmgr->pp_one_vf && (state_entry->ucGfxClockIndexHigh > 0)) > > + performance_level->gfx_clock = patom_record_V2[4].ulClk; > > + else > > + performance_level->gfx_clock = > > + patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk; > > } > > > > performance_level->mem_clock = mclk_dep_table->entries > > @@ -3495,6 +3529,7 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) > > smum_send_msg_to_smc_with_parameter(hwmgr, > > PPSMC_MSG_SetSoftMinGfxclkByIndex, > > data->smc_state_table.gfx_boot_level); > > + > > data->dpm_table.gfx_table.dpm_state.soft_min_level = > > data->smc_state_table.gfx_boot_level; > > } > > @@ -3518,6 +3553,9 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr) > > } > > } > > > > + if (!hwmgr->not_vf) > > + return 0; > > + > > if (!data->registry_data.socclk_dpm_key_disabled) { > > if (data->smc_state_table.soc_boot_level != > > data->dpm_table.soc_table.dpm_state.soft_min_level) { > > @@ -3560,6 +3598,9 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr) > > } > > } > > > > + if (!hwmgr->not_vf) > > + return 0; > > + > > if (!data->registry_data.socclk_dpm_key_disabled) { > > if (data->smc_state_table.soc_max_level != > > data->dpm_table.soc_table.dpm_state.soft_max_level) { > > @@ -4054,15 +4095,25 @@ static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_fo > > } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { > > *mclk_mask = 0; > > } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { > > - *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; > > + /* under vega10 pp one vf mode, the gfx clk dpm need be lower > > + * to level-4 due to the limited power > > + */ > > + if (hwmgr->pp_one_vf) > > + *sclk_mask = 4; > > + else > > + *sclk_mask = table_info->vdd_dep_on_sclk->count - 1; > > *soc_mask = table_info->vdd_dep_on_socclk->count - 1; > > *mclk_mask = table_info->vdd_dep_on_mclk->count - 1; > > } > > + > > return 0; > > } > > > > static void vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode) > > { > > + if (!hwmgr->not_vf) > > + return; > > + > > switch (mode) { > > case AMD_FAN_CTRL_NONE: > > vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100); > > @@ -4176,6 +4227,9 @@ static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, > > break; > > } > > > > + if (!hwmgr->not_vf) > > + return ret; > > + > > if (!ret) { > > if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) > > vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE); > > @@ -4480,7 +4534,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, > > struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table); > > struct vega10_odn_clock_voltage_dependency_table *podn_vdd_dep = NULL; > > > > - int i, now, size = 0; > > + int i, now, size, count = 0; > > > > switch (type) { > > case PP_SCLK: > > @@ -4490,7 +4544,12 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, > > smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex); > > now = smum_get_argument(hwmgr); > > > > - for (i = 0; i < sclk_table->count; i++) > > + if (hwmgr->pp_one_vf && > > + (hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) > > + count = 5; > > + else > > + count = sclk_table->count; > > + for (i = 0; i < count; i++) > > Would it be better to just limit sclktable->count directly for the > sr-iov case, or do you still want all the states available for regular > dpm? > [yttao]: it is a workaround due to the 110w power limit for pp one vf. The performance tool like RGP will check whether the peak dpm level is constant. > Under peak mode, the max dpm level should be 5 but under highest mode, the max dpm level should be 7 though it will be down after a while > Due to the 110w power limit. > > > > size += sprintf(buf + size, "%d: %uMhz %s\n", > > i, sclk_table->dpm_levels[i].value / 100, > > (i == now) ? "*" : ""); > > diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h > > index af977675fd33..2ffb666b97e6 100644 > > --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h > > +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h > > @@ -741,6 +741,7 @@ struct pp_hwmgr { > > uint32_t smu_version; > > bool not_vf; > > bool pm_en; > > + bool pp_one_vf; > > struct mutex smu_lock; > > > > uint32_t pp_table_version; > > diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c > > index 742b3dc1f6cb..a8578a27cab2 100644 > > --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c > > +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c > > @@ -61,15 +61,29 @@ static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr) > > uint32_t reg; > > uint32_t ret; > > > > - reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); > > + /* Due to the L1 policy problem under SRIOV, we have to use > > + * mmMP1_SMN_C2PMSG_103 as the driver response register > > + */ > > + if (hwmgr->pp_one_vf) { > > + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_103); > > > > - ret = phm_wait_for_register_unequal(hwmgr, reg, > > - 0, MP1_C2PMSG_90__CONTENT_MASK); > > + ret = phm_wait_for_register_unequal(hwmgr, reg, > > + 0, MP1_C2PMSG_103__CONTENT_MASK); > > > > - if (ret) > > - pr_err("No response from smu\n"); > > + if (ret) > > + pr_err("No response from smu\n"); > > > > - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); > > + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103); > > + } else { > > + reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); > > + > > + ret = phm_wait_for_register_unequal(hwmgr, reg, > > + 0, MP1_C2PMSG_90__CONTENT_MASK); > > + > > + if (ret) > > + pr_err("No response from smu\n"); > > + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90); > > + } > > } > > > > /* > > @@ -83,7 +97,10 @@ static int smu9_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, > > { > > struct amdgpu_device *adev = hwmgr->adev; > > > > - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); > > + if (hwmgr->pp_one_vf) { > > + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_101, msg); > > + } else > > + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg); > > Please fix up the parens here to be consistent. > > Thanks, > > Alex > > > > > return 0; > > } > > @@ -101,7 +118,10 @@ int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) > > > > smu9_wait_for_response(hwmgr); > > > > - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); > > + if (hwmgr->pp_one_vf) > > + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0); > > + else > > + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); > > > > smu9_send_msg_to_smc_without_waiting(hwmgr, msg); > > > > @@ -127,9 +147,17 @@ int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, > > > > smu9_wait_for_response(hwmgr); > > > > - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); > > - > > - WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); > > + /* Due to the L1 policy problem under SRIOV, we have to use > > + * mmMP1_SMN_C2PMSG_101 as the driver message register and > > + * mmMP1_SMN_C2PMSG_102 as the driver parameter register. > > + */ > > + if (hwmgr->pp_one_vf) { > > + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0); > > + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102, parameter); > > + } else { > > + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0); > > + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, parameter); > > + } > > > > smu9_send_msg_to_smc_without_waiting(hwmgr, msg); > > > > @@ -144,5 +172,8 @@ uint32_t smu9_get_argument(struct pp_hwmgr *hwmgr) > > { > > struct amdgpu_device *adev = hwmgr->adev; > > > > - return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); > > + if (hwmgr->pp_one_vf) > > + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102); > > + else > > + return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); > > } > > diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c > > index 0f3836fd9666..bea337a6c5b7 100644 > > --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c > > +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c > > @@ -71,6 +71,12 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr, > > { > > struct vega10_smumgr *priv = hwmgr->smu_backend; > > > > + /* under pp_one_vf mode, vbios or hypervisor driver > > + * has already copy table to smc so here only skip it > > + */ > > + if (!hwmgr->not_vf && !hwmgr->pp_one_vf) > > + return 0; > > + > > PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, > > "Invalid SMU Table ID!", return -EINVAL); > > PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].version != 0, > > -- > > 2.17.1 > > > > _______________________________________________ > > amd-gfx mailing list > > amd-gfx@xxxxxxxxxxxxxxxxxxxxx > > https://nam11.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.freedesktop.org%2Fmailman%2Flistinfo%2Famd-gfx&data=02%7C01%7CYintian.Tao%40amd.com%7Ca8cf686986344c8a914808d77cc82f09%7C3dd8961fe4884e608e11a82d994e183d%7C0%7C0%7C637115071536373981&sdata=LQSVdrCIjrLDcG3lAGohhkq4nJ4lGTfzyhVD%2BfjX8VA%3D&reserved=0 _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx