Correct the workload setting in order not to mix the setting with the end user. Update the workload mask accordingly. v2: changes as below: 1. the end user can not erase the workload from driver except default workload. 2. always shows the real highest priority workoad to the end user. 3. the real workload mask is combined with driver workload mask and end user workload mask. v3: apply this to the other ASICs as well. Signed-off-by: Kenneth Feng <kenneth.feng@xxxxxxx> --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 43 +++++++++++++------ drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 5 ++- .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 19 +++++++- .../gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 23 +++++++++- .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 23 +++++++++- .../gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 20 ++++++++- .../gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 20 ++++++++- .../drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c | 31 +++++++++++-- .../drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c | 25 +++++++++-- .../drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c | 28 +++++++++--- 10 files changed, 202 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 8d4aee4e2287..7d7bf44c4496 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1261,26 +1261,31 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) smu->watermarks_bitmap = 0; smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + smu->user_dpm_profile.user_workload_mask = 0; + smu->user_dpm_profile.prev_user_workload_mask = 0; atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); - smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; - smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; - smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; - smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3; - smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4; - smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; - smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; + smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; + smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; + smu->workload_priority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2; + smu->workload_priority[PP_SMC_POWER_PROFILE_VIDEO] = 3; + smu->workload_priority[PP_SMC_POWER_PROFILE_VR] = 4; + smu->workload_priority[PP_SMC_POWER_PROFILE_COMPUTE] = 5; + smu->workload_priority[PP_SMC_POWER_PROFILE_CUSTOM] = 6; if (smu->is_apu || - !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; - else - smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; + !smu_is_workload_profile_available(smu, PP_SMC_POWER_PROFILE_FULLSCREEN3D)) { + smu->workload_mask = 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; + } else { + smu->workload_mask = 1 << smu->workload_priority[PP_SMC_POWER_PROFILE_FULLSCREEN3D]; + smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D; + } + smu->driver_workload_mask = smu->workload_mask; smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D; smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING; @@ -2354,12 +2359,14 @@ static int smu_switch_power_profile(void *handle, return -EINVAL; if (!en) { - smu->workload_mask &= ~(1 << smu->workload_prority[type]); + smu->workload_mask &= ~(1 << smu->workload_priority[type]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; } else { - smu->workload_mask |= (1 << smu->workload_prority[type]); + smu->workload_mask |= (1 << smu->workload_priority[type]); + smu->driver_workload_mask |= (1 << smu->workload_priority[type]); index = fls(smu->workload_mask); index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; workload[0] = smu->workload_setting[index]; @@ -3054,12 +3061,20 @@ static int smu_set_power_profile_mode(void *handle, uint32_t param_size) { struct smu_context *smu = handle; + int ret; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; - return smu_bump_power_profile_mode(smu, param, param_size); + smu->user_dpm_profile.prev_user_workload_mask = + smu->user_dpm_profile.user_workload_mask; + smu->user_dpm_profile.user_workload_mask = (1 << smu->workload_priority[param[param_size]]); + ret = smu_bump_power_profile_mode(smu, param, param_size); + smu->user_dpm_profile.prev_user_workload_mask = + smu->user_dpm_profile.user_workload_mask; + + return ret; } static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 8bb32b3f0d9c..88294d986b36 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -240,6 +240,8 @@ struct smu_user_dpm_profile { /* user clock state information */ uint32_t clk_mask[SMU_CLK_COUNT]; uint32_t clk_dependency; + uint32_t user_workload_mask; + uint32_t prev_user_workload_mask; }; #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ @@ -557,7 +559,8 @@ struct smu_context { bool disable_uclk_switch; uint32_t workload_mask; - uint32_t workload_prority[WORKLOAD_POLICY_MAX]; + uint32_t driver_workload_mask; + uint32_t workload_priority[WORKLOAD_POLICY_MAX]; uint32_t workload_setting[WORKLOAD_POLICY_MAX]; uint32_t power_profile_mode; uint32_t default_power_profile_mode; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 5ad09323a29d..c0ae36e7c896 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1449,12 +1449,24 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, int workload_type = 0; uint32_t profile_mode = input[size]; int ret = 0; + uint32_t index; if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); return -EINVAL; } + if (smu->user_dpm_profile.prev_user_workload_mask != + smu->user_dpm_profile.user_workload_mask) { + if (smu->workload_mask & smu->user_dpm_profile.prev_user_workload_mask && + !(smu->driver_workload_mask & smu->user_dpm_profile.prev_user_workload_mask)) + smu->workload_mask &= ~smu->user_dpm_profile.prev_user_workload_mask; + + if (input[size] != smu->default_power_profile_mode) { + smu->workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + } + } if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && (smu->smc_fw_version >= 0x360d00)) { @@ -1521,16 +1533,19 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu, return -EINVAL; } + smu->workload_mask |= (1 << workload_type); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 9fa305ba6422..e929b882ce60 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2008,9 +2008,22 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u { DpmActivityMonitorCoeffInt_t activity_monitor; int workload_type, ret = 0; + uint32_t index; smu->power_profile_mode = input[size]; + if (smu->user_dpm_profile.prev_user_workload_mask != + smu->user_dpm_profile.user_workload_mask) { + if (smu->workload_mask & smu->user_dpm_profile.prev_user_workload_mask && + !(smu->driver_workload_mask & smu->user_dpm_profile.prev_user_workload_mask)) + smu->workload_mask &= ~smu->user_dpm_profile.prev_user_workload_mask; + + if (input[size] != smu->default_power_profile_mode) { + smu->workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + } + } + if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); return -EINVAL; @@ -2081,11 +2094,19 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + + smu->workload_mask |= (1 << workload_type); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + if (!ret) { + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; + } + return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 77e58eb46328..9f6548039948 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1713,9 +1713,22 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); int workload_type, ret = 0; + uint32_t index; smu->power_profile_mode = input[size]; + if (smu->user_dpm_profile.prev_user_workload_mask != + smu->user_dpm_profile.user_workload_mask) { + if (smu->workload_mask & smu->user_dpm_profile.prev_user_workload_mask && + !(smu->driver_workload_mask & smu->user_dpm_profile.prev_user_workload_mask)) + smu->workload_mask &= ~smu->user_dpm_profile.prev_user_workload_mask; + + if (input[size] != smu->default_power_profile_mode) { + smu->workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + } + } + if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); return -EINVAL; @@ -1786,11 +1799,19 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long * smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + + smu->workload_mask |= (1 << workload_type); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); + if (!ret) { + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; + } + return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 6c43724c01dd..ba8bf369d6d7 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -1058,12 +1058,25 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, { int workload_type, ret; uint32_t profile_mode = input[size]; + uint32_t index; if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); return -EINVAL; } + if (smu->user_dpm_profile.prev_user_workload_mask != + smu->user_dpm_profile.user_workload_mask) { + if (smu->workload_mask & smu->user_dpm_profile.prev_user_workload_mask && + !(smu->driver_workload_mask & smu->user_dpm_profile.prev_user_workload_mask)) + smu->workload_mask &= ~smu->user_dpm_profile.prev_user_workload_mask; + + if (input[size] != smu->default_power_profile_mode) { + smu->workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + } + } + if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) return 0; @@ -1078,8 +1091,9 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, return -EINVAL; } + smu->workload_mask |= (1 << workload_type); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", @@ -1087,7 +1101,9 @@ static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, return ret; } - smu->power_profile_mode = profile_mode; + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 0b210b1f2628..6a18cb9b6e59 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -866,12 +866,25 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u { int workload_type, ret; uint32_t profile_mode = input[size]; + uint32_t index; if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode); return -EINVAL; } + if (smu->user_dpm_profile.prev_user_workload_mask != + smu->user_dpm_profile.user_workload_mask) { + if (smu->workload_mask & smu->user_dpm_profile.prev_user_workload_mask && + !(smu->driver_workload_mask & smu->user_dpm_profile.prev_user_workload_mask)) + smu->workload_mask &= ~smu->user_dpm_profile.prev_user_workload_mask; + + if (input[size] != smu->default_power_profile_mode) { + smu->workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + } + } + if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT || profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) return 0; @@ -889,15 +902,18 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u return -EINVAL; } + smu->workload_mask |= (1 << workload_type); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify, - 1 << workload_type, + smu->workload_mask, NULL); if (ret) { dev_err_once(smu->adev->dev, "Fail to set workload type %d\n", workload_type); return ret; } - smu->power_profile_mode = profile_mode; + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 8d25cc1f218f..8c26cac6cf18 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -2474,9 +2474,22 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, &(activity_monitor_external.DpmActivityMonitorCoeffInt); int workload_type, ret = 0; u32 workload_mask, selected_workload_mask; + uint32_t index; smu->power_profile_mode = input[size]; + if (smu->user_dpm_profile.prev_user_workload_mask != + smu->user_dpm_profile.user_workload_mask) { + if (smu->workload_mask & smu->user_dpm_profile.prev_user_workload_mask && + !(smu->driver_workload_mask & smu->user_dpm_profile.prev_user_workload_mask)) + smu->workload_mask &= ~smu->user_dpm_profile.prev_user_workload_mask; + + if (input[size] != smu->default_power_profile_mode) { + smu->workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + } + } + if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); return -EINVAL; @@ -2555,12 +2568,24 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, workload_mask |= 1 << workload_type; } + smu->workload_mask |= workload_mask; ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - workload_mask, + smu->workload_mask, NULL); - if (!ret) - smu->workload_mask = selected_workload_mask; + if (!ret) { + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; + if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING) { + workload_type = smu_cmn_to_asic_specific_index(smu, + CMN2ASIC_MAPPING_WORKLOAD, + PP_SMC_POWER_PROFILE_FULLSCREEN3D); + smu->power_profile_mode = smu->workload_mask & (1 << workload_type) + ? PP_SMC_POWER_PROFILE_FULLSCREEN3D + : PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + } + } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index 23f13388455f..2323c74ee50b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -2429,9 +2429,22 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); int workload_type, ret = 0; + uint32_t index; smu->power_profile_mode = input[size]; + if (smu->user_dpm_profile.prev_user_workload_mask != + smu->user_dpm_profile.user_workload_mask) { + if (smu->workload_mask & smu->user_dpm_profile.prev_user_workload_mask && + !(smu->driver_workload_mask & smu->user_dpm_profile.prev_user_workload_mask)) + smu->workload_mask &= ~smu->user_dpm_profile.prev_user_workload_mask; + + if (input[size] != smu->default_power_profile_mode) { + smu->workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + } + } + if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_WINDOW3D) { dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); return -EINVAL; @@ -2487,13 +2500,19 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp smu->power_profile_mode); if (workload_type < 0) return -EINVAL; + + smu->workload_mask |= (1 << workload_type); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, - 1 << workload_type, NULL); + smu->workload_mask, NULL); if (ret) dev_err(smu->adev->dev, "[%s] Failed to set work load mask!", __func__); - else - smu->workload_mask = (1 << workload_type); + + if (!ret) { + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; + } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index cefe10b95d8e..c2fd47f04e2d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -1787,9 +1787,22 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, DpmActivityMonitorCoeffInt_t *activity_monitor = &(activity_monitor_external.DpmActivityMonitorCoeffInt); int workload_type, ret = 0; + uint32_t index; uint32_t current_profile_mode = smu->power_profile_mode; smu->power_profile_mode = input[size]; + if (smu->user_dpm_profile.prev_user_workload_mask != + smu->user_dpm_profile.user_workload_mask) { + if (smu->workload_mask & smu->user_dpm_profile.prev_user_workload_mask && + !(smu->driver_workload_mask & smu->user_dpm_profile.prev_user_workload_mask)) + smu->workload_mask &= ~smu->user_dpm_profile.prev_user_workload_mask; + + if (input[size] != smu->default_power_profile_mode) { + smu->workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + smu->driver_workload_mask &= ~(1 << smu->workload_priority[smu->default_power_profile_mode]); + } + } + if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) { dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode); return -EINVAL; @@ -1857,12 +1870,15 @@ static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu, if (workload_type < 0) return -EINVAL; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetWorkloadMask, - 1 << workload_type, - NULL); - if (!ret) - smu->workload_mask = 1 << workload_type; + smu->workload_mask |= (1 << workload_type); + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + smu->workload_mask, NULL); + + if (!ret) { + index = fls(smu->workload_mask); + index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0; + smu->power_profile_mode = smu->workload_setting[index]; + } return ret; } -- 2.34.1