The allow_xgmi_power_down(true/false) will be generally replaced by: - allow: select_xgmi_plpd_policy(XGMI_PLPD_DEFAULT) - disallow: select_xgmi_plpd_policy(XGMI_PLPD_DISALLOW) Signed-off-by: Le Ma <le.ma@xxxxxxx> --- .../gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 21 +++++++++-------- .../drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 23 ++++++++++++------- .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c | 22 ++++++++++-------- 3 files changed, 38 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 080140a0f673..6e2e665ad383 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2227,7 +2227,8 @@ static int arcturus_set_df_cstate(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } -static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en) +static int arcturus_select_xgmi_plpd_policy(struct smu_context *smu, + enum pp_xgmi_plpd_mode mode) { uint32_t smu_version; int ret; @@ -2244,16 +2245,16 @@ static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en) return -EINVAL; } - if (en) + if (mode == XGMI_PLPD_DEFAULT) return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl, - 1, - NULL); - - return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - 0, - NULL); + 1, NULL); + else if (mode == XGMI_PLPD_DISALLOW) + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GmiPwrDnControl, + 0, NULL); + else + return -EINVAL; } static const struct throttling_logging_label { @@ -2455,7 +2456,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .set_df_cstate = arcturus_set_df_cstate, - .allow_xgmi_power_down = arcturus_allow_xgmi_power_down, + .select_xgmi_plpd_policy = arcturus_select_xgmi_plpd_policy, .log_thermal_throttling_event = arcturus_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 23820204efd7..b57184a3e24f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1604,20 +1604,27 @@ static int aldebaran_set_df_cstate(struct smu_context *smu, return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } -static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en) +static int aldebaran_select_xgmi_plpd_policy(struct smu_context *smu, + enum pp_xgmi_plpd_mode mode) { struct amdgpu_device *adev = smu->adev; /* The message only works on master die and NACK will be sent back for other dies, only send it on master die */ - if (!adev->smuio.funcs->get_socket_id(adev) && - !adev->smuio.funcs->get_die_id(adev)) + if (adev->smuio.funcs->get_socket_id(adev) || + adev->smuio.funcs->get_die_id(adev)) + return 0; + + if (mode == XGMI_PLPD_DEFAULT) + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GmiPwrDnControl, + 0, NULL); + else if (mode == XGMI_PLPD_DISALLOW) return smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GmiPwrDnControl, - en ? 0 : 1, - NULL); + SMU_MSG_GmiPwrDnControl, + 1, NULL); else - return 0; + return -EINVAL; } static const struct throttling_logging_label { @@ -2072,7 +2079,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, .set_df_cstate = aldebaran_set_df_cstate, - .allow_xgmi_power_down = aldebaran_allow_xgmi_power_down, + .select_xgmi_plpd_policy = aldebaran_select_xgmi_plpd_policy, .log_thermal_throttling_event = aldebaran_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, .set_pp_feature_mask = smu_cmn_set_pp_feature_mask, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index b137c37903fc..c20537fb9df0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -1897,12 +1897,6 @@ static int smu_v13_0_6_set_df_cstate(struct smu_context *smu, state, NULL); } -static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en) -{ - return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl, - en ? 0 : 1, NULL); -} - static const char *const throttling_logging_label[] = { [THROTTLER_PROCHOT_BIT] = "Prochot", [THROTTLER_PPT_BIT] = "PPT", @@ -2730,13 +2724,22 @@ static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu, case XGMI_PLPD_OPTIMIZED: param = PPSMC_PLPD_MODE_OPTIMIZED; break; + case XGMI_PLPD_DISALLOW: + param = 0; + break; default: return -EINVAL; } - /* change xgmi per-link power down policy */ - ret = smu_cmn_send_smc_msg_with_param( - smu, SMU_MSG_SelectPLPDMode, param, NULL); + if (mode == XGMI_PLPD_DISALLOW) + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GmiPwrDnControl, + param, NULL); + else + /* change xgmi per-link power down policy */ + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SelectPLPDMode, + param, NULL); if (ret) dev_err(adev->dev, @@ -2785,7 +2788,6 @@ static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { .set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range, .od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table, .set_df_cstate = smu_v13_0_6_set_df_cstate, - .allow_xgmi_power_down = smu_v13_0_6_allow_xgmi_power_down, .select_xgmi_plpd_policy = smu_v13_0_6_select_xgmi_plpd_policy, .log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, -- 2.38.1