RE: [PATCH v3] drm/amd/pm: Fix smu v13.0.6 caps initialization

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



[AMD Official Use Only - AMD Internal Distribution Only]

Reviewed-by: Yang Wang <kevinyang.wang@xxxxxxx>

Best Regards,
Kevin

-----Original Message-----
From: Lazar, Lijo <Lijo.Lazar@xxxxxxx>
Sent: Tuesday, January 21, 2025 13:39
To: amd-gfx@xxxxxxxxxxxxxxxxxxxxx
Cc: Zhang, Hawking <Hawking.Zhang@xxxxxxx>; Deucher, Alexander <Alexander.Deucher@xxxxxxx>; Kamal, Asad <Asad.Kamal@xxxxxxx>; Wang, Yang(Kevin) <KevinYang.Wang@xxxxxxx>; Deucher, Alexander <Alexander.Deucher@xxxxxxx>
Subject: [PATCH v3] drm/amd/pm: Fix smu v13.0.6 caps initialization

Fix the initialization and usage of SMU v13.0.6 capability values. Use caps_set/clear functions to set/clear capability.

Also, fix SET_UCLK_MAX capability on APUs, it is supported on APUs.

Signed-off-by: Lijo Lazar <lijo.lazar@xxxxxxx>
Reviewed-by: Alex Deucher <alexander.deucher@xxxxxxx>

Fixes: 9bb53d2ce109 ("drm/amd/pm: Add capability flags for SMU v13.0.6")
---
v1: ("drm/amd/pm: Use correct macros for smu caps")
v2:
        Use caps_set/clear instead of macros (Alex). Commit message changed.
        Use BIT_ULL (Kevin)
        Fix SET_UCLK_MAX capability on APUs
v3:
        Rename to cap to indicate operations on single capability (Alex)
        Use SMU_CAP in enum value definition also for consistency

 .../drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c  | 197 ++++++++++--------
 1 file changed, 108 insertions(+), 89 deletions(-)

diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 56e26fcd3066..d991a5df3796 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -101,26 +101,25 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_14.bin");
 #define MCA_BANK_IPID(_ip, _hwid, _type) \
        [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }

+#define SMU_CAP(x) SMU_13_0_6_CAPS_##x
+
 enum smu_v13_0_6_caps {
-       SMU_13_0_6_CAPS_DPM,
-       SMU_13_0_6_CAPS_UNI_METRICS,
-       SMU_13_0_6_CAPS_DPM_POLICY,
-       SMU_13_0_6_CAPS_OTHER_END_METRICS,
-       SMU_13_0_6_CAPS_SET_UCLK_MAX,
-       SMU_13_0_6_CAPS_PCIE_METRICS,
-       SMU_13_0_6_CAPS_HST_LIMIT_METRICS,
-       SMU_13_0_6_CAPS_MCA_DEBUG_MODE,
-       SMU_13_0_6_CAPS_PER_INST_METRICS,
-       SMU_13_0_6_CAPS_CTF_LIMIT,
-       SMU_13_0_6_CAPS_RMA_MSG,
-       SMU_13_0_6_CAPS_ACA_SYND,
-       SMU_13_0_6_CAPS_SDMA_RESET,
-       SMU_13_0_6_CAPS_ALL,
+       SMU_CAP(DPM),
+       SMU_CAP(UNI_METRICS),
+       SMU_CAP(DPM_POLICY),
+       SMU_CAP(OTHER_END_METRICS),
+       SMU_CAP(SET_UCLK_MAX),
+       SMU_CAP(PCIE_METRICS),
+       SMU_CAP(HST_LIMIT_METRICS),
+       SMU_CAP(MCA_DEBUG_MODE),
+       SMU_CAP(PER_INST_METRICS),
+       SMU_CAP(CTF_LIMIT),
+       SMU_CAP(RMA_MSG),
+       SMU_CAP(ACA_SYND),
+       SMU_CAP(SDMA_RESET),
+       SMU_CAP(ALL),
 };

-#define SMU_CAPS_MASK(x) (ULL(1) << x)
-#define SMU_CAPS(x) SMU_CAPS_MASK(SMU_13_0_6_CAPS_##x)
-
 struct mca_bank_ipid {
        enum amdgpu_mca_ip ip;
        uint16_t hwid;
@@ -284,95 +283,116 @@ struct smu_v13_0_6_dpm_map {
        uint32_t *freq_table;
 };

-static void smu_v13_0_14_init_caps(struct smu_context *smu)
+static inline void smu_v13_0_6_cap_set(struct smu_context *smu,
+                                      enum smu_v13_0_6_caps cap)
+{
+       struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+       dpm_context->caps |= BIT_ULL(cap);
+}
+
+static inline void smu_v13_0_6_cap_clear(struct smu_context *smu,
+                                        enum smu_v13_0_6_caps cap)
 {
        struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
-       uint64_t caps = SMU_CAPS(DPM) | SMU_CAPS(UNI_METRICS) |
-                       SMU_CAPS(SET_UCLK_MAX) | SMU_CAPS(DPM_POLICY) |
-                       SMU_CAPS(PCIE_METRICS) | SMU_CAPS(CTF_LIMIT) |
-                       SMU_CAPS(MCA_DEBUG_MODE) | SMU_CAPS(RMA_MSG) |
-                       SMU_CAPS(ACA_SYND);
+
+       dpm_context->caps &= ~BIT_ULL(cap);
+}
+
+static inline bool smu_v13_0_6_cap_supported(struct smu_context *smu,
+                                            enum smu_v13_0_6_caps cap)
+{
+       struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+
+       return !!(dpm_context->caps & BIT_ULL(cap)); }
+
+static void smu_v13_0_14_init_caps(struct smu_context *smu) {
+       enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+                                                    SMU_CAP(UNI_METRICS),
+                                                    SMU_CAP(SET_UCLK_MAX),
+                                                    SMU_CAP(DPM_POLICY),
+                                                    SMU_CAP(PCIE_METRICS),
+                                                    SMU_CAP(CTF_LIMIT),
+                                                    SMU_CAP(MCA_DEBUG_MODE),
+                                                    SMU_CAP(RMA_MSG),
+                                                    SMU_CAP(ACA_SYND) };
        uint32_t fw_ver = smu->smc_fw_version;

+       for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+               smu_v13_0_6_cap_set(smu, default_cap_list[i]);
+
        if (fw_ver >= 0x05550E00)
-               caps |= SMU_CAPS(OTHER_END_METRICS);
+               smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
        if (fw_ver >= 0x05551000)
-               caps |= SMU_CAPS(HST_LIMIT_METRICS);
+               smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
        if (fw_ver >= 0x05550B00)
-               caps |= SMU_CAPS(PER_INST_METRICS);
+               smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
        if (fw_ver > 0x05550f00)
-               caps |= SMU_CAPS(SDMA_RESET);
-
-       dpm_context->caps = caps;
+               smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
 }

 static void smu_v13_0_6_init_caps(struct smu_context *smu)  {
-       uint64_t caps = SMU_CAPS(DPM) | SMU_CAPS(UNI_METRICS) |
-                       SMU_CAPS(SET_UCLK_MAX) | SMU_CAPS(DPM_POLICY) |
-                       SMU_CAPS(PCIE_METRICS) | SMU_CAPS(MCA_DEBUG_MODE) |
-                       SMU_CAPS(CTF_LIMIT) | SMU_CAPS(RMA_MSG) |
-                       SMU_CAPS(ACA_SYND);
-       struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
+       enum smu_v13_0_6_caps default_cap_list[] = { SMU_CAP(DPM),
+                                                    SMU_CAP(UNI_METRICS),
+                                                    SMU_CAP(SET_UCLK_MAX),
+                                                    SMU_CAP(DPM_POLICY),
+                                                    SMU_CAP(PCIE_METRICS),
+                                                    SMU_CAP(CTF_LIMIT),
+                                                    SMU_CAP(MCA_DEBUG_MODE),
+                                                    SMU_CAP(RMA_MSG),
+                                                    SMU_CAP(ACA_SYND) };
        struct amdgpu_device *adev = smu->adev;
        uint32_t fw_ver = smu->smc_fw_version;
        uint32_t pgm = (fw_ver >> 24) & 0xFF;

+       for (int i = 0; i < ARRAY_SIZE(default_cap_list); i++)
+               smu_v13_0_6_cap_set(smu, default_cap_list[i]);
        if (fw_ver < 0x552F00)
-               caps &= ~SMU_CAPS(DPM);
+               smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM));

        if (adev->flags & AMD_IS_APU) {
-               caps &= ~SMU_CAPS(PCIE_METRICS);
-               caps &= ~SMU_CAPS(SET_UCLK_MAX);
-               caps &= ~SMU_CAPS(DPM_POLICY);
-               caps &= ~SMU_CAPS(RMA_MSG);
-               caps &= ~SMU_CAPS(ACA_SYND);
+               smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
+               smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
+               smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
+               smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));

                if (fw_ver <= 0x4556900)
-                       caps &= ~SMU_CAPS(UNI_METRICS);
-
+                       smu_v13_0_6_cap_clear(smu, SMU_CAP(UNI_METRICS));
                if (fw_ver >= 0x04556F00)
-                       caps |= SMU_CAPS(HST_LIMIT_METRICS);
+                       smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
                if (fw_ver >= 0x04556A00)
-                       caps |= SMU_CAPS(PER_INST_METRICS);
+                       smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
                if (fw_ver < 0x554500)
-                       caps &= ~SMU_CAPS(CTF_LIMIT);
+                       smu_v13_0_6_cap_clear(smu, SMU_CAP(CTF_LIMIT));
        } else {
                if (fw_ver >= 0x557600)
-                       caps |= SMU_CAPS(OTHER_END_METRICS);
+                       smu_v13_0_6_cap_set(smu, SMU_CAP(OTHER_END_METRICS));
                if (fw_ver < 0x00556000)
-                       caps &= ~SMU_CAPS(DPM_POLICY);
+                       smu_v13_0_6_cap_clear(smu, SMU_CAP(DPM_POLICY));
                if (amdgpu_sriov_vf(adev) && (fw_ver < 0x556600))
-                       caps &= ~SMU_CAPS(SET_UCLK_MAX);
+                       smu_v13_0_6_cap_clear(smu, SMU_CAP(SET_UCLK_MAX));
                if (fw_ver < 0x556300)
-                       caps &= ~SMU_CAPS(PCIE_METRICS);
+                       smu_v13_0_6_cap_clear(smu, SMU_CAP(PCIE_METRICS));
                if (fw_ver < 0x554800)
-                       caps &= ~SMU_CAPS(MCA_DEBUG_MODE);
+                       smu_v13_0_6_cap_clear(smu, SMU_CAP(MCA_DEBUG_MODE));
                if (fw_ver >= 0x556F00)
-                       caps |= SMU_CAPS(PER_INST_METRICS);
+                       smu_v13_0_6_cap_set(smu, SMU_CAP(PER_INST_METRICS));
                if (fw_ver < 0x554500)
-                       caps &= ~SMU_CAPS(CTF_LIMIT);
+                       smu_v13_0_6_cap_clear(smu, SMU_CAP(CTF_LIMIT));
                if (fw_ver < 0x00555a00)
-                       caps &= ~SMU_CAPS(RMA_MSG);
+                       smu_v13_0_6_cap_clear(smu, SMU_CAP(RMA_MSG));
                if (fw_ver < 0x00555600)
-                       caps &= ~SMU_CAPS(ACA_SYND);
+                       smu_v13_0_6_cap_clear(smu, SMU_CAP(ACA_SYND));
                if (pgm == 0 && fw_ver >= 0x557900)
-                       caps |= SMU_CAPS(HST_LIMIT_METRICS);
+                       smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS));
        }
        if (((pgm == 7) && (fw_ver > 0x07550700)) ||
            ((pgm == 0) && (fw_ver > 0x00557700)) ||
            ((pgm == 4) && (fw_ver > 0x4556e6c)))
-               caps |= SMU_CAPS(SDMA_RESET);
-
-       dpm_context->caps = caps;
-}
-
-static inline bool smu_v13_0_6_caps_supported(struct smu_context *smu,
-                                             enum smu_v13_0_6_caps caps)
-{
-       struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
-
-       return (dpm_context->caps & SMU_CAPS_MASK(caps)) == SMU_CAPS_MASK(caps);
+               smu_v13_0_6_cap_set(smu, SMU_CAP(SDMA_RESET));
 }

 static void smu_v13_0_x_init_caps(struct smu_context *smu) @@ -716,7 +736,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
        MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
        struct PPTable_t *pptable =
                (struct PPTable_t *)smu_table->driver_pptable;
-       bool flag = !smu_v13_0_6_caps_supported(smu, SMU_CAPS(UNI_METRICS));
+       bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
        int ret, i, retry = 100;
        uint32_t table_version;

@@ -912,7 +932,7 @@ static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
        smu_v13_0_6_setup_driver_pptable(smu);

        /* DPM policy not supported in older firmwares */
-       if (!smu_v13_0_6_caps_supported(smu, SMU_CAPS(DPM_POLICY))) {
+       if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM_POLICY))) {
                struct smu_dpm_context *smu_dpm = &smu->smu_dpm;

                smu_dpm->dpm_policies->policy_mask &= @@ -1089,7 +1109,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
        struct smu_table_context *smu_table = &smu->smu_table;
        MetricsTableX_t *metrics_x = (MetricsTableX_t *)smu_table->metrics_table;
        MetricsTableA_t *metrics_a = (MetricsTableA_t *)smu_table->metrics_table;
-       bool flag = !smu_v13_0_6_caps_supported(smu, SMU_CAPS(UNI_METRICS));
+       bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
        struct amdgpu_device *adev = smu->adev;
        int ret = 0;
        int xcc_id;
@@ -1102,7 +1122,7 @@ static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
        switch (member) {
        case METRICS_CURR_GFXCLK:
        case METRICS_AVERAGE_GFXCLK:
-               if (smu_v13_0_6_caps_supported(smu, SMU_CAPS(DPM))) {
+               if (smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
                        xcc_id = GET_INST(GC, 0);
                        *value = SMUQ10_ROUND(GET_METRIC_FIELD(GfxclkFrequency, flag)[xcc_id]);
                } else {
@@ -1791,7 +1811,7 @@ static int smu_v13_0_6_notify_unload(struct smu_context *smu)  static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)  {
        /* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
-       if (!smu_v13_0_6_caps_supported(smu, SMU_CAPS(MCA_DEBUG_MODE)))
+       if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(MCA_DEBUG_MODE)))
                return 0;

        return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead, @@ -1936,8 +1956,8 @@ static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
                        if (max == pstate_table->uclk_pstate.curr.max)
                                return 0;
                        /* For VF, only allowed in FW versions 85.102 or greater */
-                       if (!smu_v13_0_6_caps_supported(smu,
-                                                       SMU_CAPS(SET_UCLK_MAX)))
+                       if (!smu_v13_0_6_cap_supported(smu,
+                                                      SMU_CAP(SET_UCLK_MAX)))
                                return -EOPNOTSUPP;
                        /* Only max clock limiting is allowed for UCLK */
                        ret = smu_v13_0_set_soft_freq_limited_range(
@@ -2141,7 +2161,7 @@ static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,

        ret = smu_cmn_get_enabled_mask(smu, feature_mask);

-       if (ret == -EIO && !smu_v13_0_6_caps_supported(smu, SMU_CAPS(DPM))) {
+       if (ret == -EIO && !smu_v13_0_6_cap_supported(smu, SMU_CAP(DPM))) {
                *feature_mask = 0;
                ret = 0;
        }
@@ -2437,7 +2457,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
        struct smu_table_context *smu_table = &smu->smu_table;
        struct gpu_metrics_v1_7 *gpu_metrics =
                (struct gpu_metrics_v1_7 *)smu_table->gpu_metrics_table;
-       bool flag = !smu_v13_0_6_caps_supported(smu, SMU_CAPS(UNI_METRICS));
+       bool flag = !smu_v13_0_6_cap_supported(smu, SMU_CAP(UNI_METRICS));
        int ret = 0, xcc_id, inst, i, j, k, idx;
        struct amdgpu_device *adev = smu->adev;
        MetricsTableX_t *metrics_x;
@@ -2519,7 +2539,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
                 * table for both pf & one vf for smu version 85.99.0 or higher else report only
                 * for pf from registers
                 */
-               if (smu_v13_0_6_caps_supported(smu, SMU_CAPS(PCIE_METRICS))) {
+               if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PCIE_METRICS))) {
                        gpu_metrics->pcie_link_width = metrics_x->PCIeLinkWidth;
                        gpu_metrics->pcie_link_speed =
                                pcie_gen_to_speed(metrics_x->PCIeLinkSpeed);
@@ -2548,8 +2568,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
                                metrics_x->PCIeNAKSentCountAcc;
                gpu_metrics->pcie_nak_rcvd_count_acc =
                                metrics_x->PCIeNAKReceivedCountAcc;
-               if (smu_v13_0_6_caps_supported(smu,
-                                              SMU_CAPS(OTHER_END_METRICS)))
+               if (smu_v13_0_6_cap_supported(smu, SMU_CAP(OTHER_END_METRICS)))
                        gpu_metrics->pcie_lc_perf_other_end_recovery =
                                metrics_x->PCIeOtherEndRecoveryAcc;

@@ -2574,7 +2593,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table

        gpu_metrics->num_partition = adev->xcp_mgr->num_xcps;

-       per_inst = smu_v13_0_6_caps_supported(smu, SMU_CAPS(PER_INST_METRICS));
+       per_inst = smu_v13_0_6_cap_supported(smu, SMU_CAP(PER_INST_METRICS));

        for_each_xcp(adev->xcp_mgr, xcp, i) {
                amdgpu_xcp_get_inst_details(xcp, AMDGPU_XCP_VCN, &inst_mask); @@ -2605,8 +2624,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
                                gpu_metrics->xcp_stats[i].gfx_busy_acc[idx] =
                                        SMUQ10_ROUND(metrics_x->GfxBusyAcc[inst]);

-                               if (smu_v13_0_6_caps_supported(
-                                           smu, SMU_CAPS(HST_LIMIT_METRICS)))
+                               if (smu_v13_0_6_cap_supported(
+                                           smu, SMU_CAP(HST_LIMIT_METRICS)))
                                        gpu_metrics->xcp_stats[i].gfx_below_host_limit_acc[idx] =
                                                SMUQ10_ROUND(metrics_x->GfxclkBelowHostLimitAcc
                                                                [inst]);
@@ -2714,7 +2733,7 @@ static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
                return -EINVAL;

        /*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
-       if (!smu_v13_0_6_caps_supported(smu, SMU_CAPS(CTF_LIMIT)))
+       if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(CTF_LIMIT)))
                return 0;

        /* Get SOC Max operating temperature */ @@ -2819,7 +2838,7 @@ static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
        int ret;

        /* NOTE: the message is only valid on dGPU with pmfw 85.90.0 and above */
-       if (!smu_v13_0_6_caps_supported(smu, SMU_CAPS(RMA_MSG)))
+       if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(RMA_MSG)))
                return 0;

        ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL); @@ -2840,16 +2859,16 @@ static int smu_v13_0_6_reset_sdma(struct smu_context *smu, uint32_t inst_mask)
        switch (amdgpu_ip_version(smu->adev, MP1_HWIP, 0)) {
        case IP_VERSION(13, 0, 6):
                if ((smu_program == 7 || smu_program == 0) &&
-                   smu_v13_0_6_caps_supported(smu, SMU_CAPS(SDMA_RESET)))
+                   smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET)))
                        ret = smu_cmn_send_smc_msg_with_param(smu,
                                SMU_MSG_ResetSDMA, inst_mask, NULL);
                else if ((smu_program == 4) &&
-                        smu_v13_0_6_caps_supported(smu, SMU_CAPS(SDMA_RESET)))
+                        smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET)))
                        ret = smu_cmn_send_smc_msg_with_param(smu,
                                      SMU_MSG_ResetSDMA2, inst_mask, NULL);
                break;
        case IP_VERSION(13, 0, 14):
-               if (smu_v13_0_6_caps_supported(smu, SMU_CAPS(SDMA_RESET)))
+               if (smu_v13_0_6_cap_supported(smu, SMU_CAP(SDMA_RESET)))
                        ret = smu_cmn_send_smc_msg_with_param(smu,
                                      SMU_MSG_ResetSDMA2, inst_mask, NULL);
                break;
@@ -3175,7 +3194,7 @@ static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amd
        if (instlo != 0x03b30400)
                return false;

-       if (smu_v13_0_6_caps_supported(smu, SMU_CAPS(ACA_SYND))) {
+       if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND))) {
                errcode = MCA_REG__SYND__ERRORINFORMATION(entry->regs[MCA_REG_IDX_SYND]);
                errcode &= 0xff;
        } else {
@@ -3464,7 +3483,7 @@ static int aca_smu_parse_error_code(struct amdgpu_device *adev, struct aca_bank
        struct smu_context *smu = adev->powerplay.pp_handle;
        int error_code;

-       if (smu_v13_0_6_caps_supported(smu, SMU_CAPS(ACA_SYND)))
+       if (smu_v13_0_6_cap_supported(smu, SMU_CAP(ACA_SYND)))
                error_code = ACA_REG__SYND__ERRORINFORMATION(bank->regs[ACA_REG_IDX_SYND]);
        else
                error_code = ACA_REG__STATUS__ERRORCODE(bank->regs[ACA_REG_IDX_STATUS]);
--
2.25.1





[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux