Use the helper for all non-VCN code which queries the number of VCN instances. Signed-off-by: Alex Deucher <alexander.deucher@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 9 +++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c | 8 ++++--- drivers/gpu/drm/amd/amdgpu/nv.c | 4 +++- drivers/gpu/drm/amd/amdgpu/soc21.c | 4 +++- drivers/gpu/drm/amd/amdgpu/soc24.c | 4 +++- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 2 +- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 6 ++++-- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 21 ++++++++++++------- .../amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 5 +++-- .../gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 5 +++-- .../gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c | 5 +++-- 13 files changed, 52 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 49ca8c814455d..7c3719ef07070 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -2028,6 +2028,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL, int amdgpu_debugfs_init(struct amdgpu_device *adev) { + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); struct dentry *root = adev_to_drm(adev)->primary->debugfs_root; struct dentry *ent; int r, i; @@ -2082,7 +2083,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_debugfs_ring_init(adev, ring); } - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (i = 0; i < num_vcn_inst; i++) { if (!amdgpu_vcnfw_log) break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 5204d1217cec4..e1e0f80b23606 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -400,6 +400,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, enum amd_ip_block_type type; unsigned int num_rings = 0; unsigned int i, j; + int num_inst; if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) return -EINVAL; @@ -464,7 +465,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, break; case AMDGPU_HW_IP_VCN_DEC: type = AMD_IP_BLOCK_TYPE_VCN; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + num_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); + for (i = 0; i < num_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue; @@ -476,7 +478,8 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, break; case AMDGPU_HW_IP_VCN_ENC: type = AMD_IP_BLOCK_TYPE_VCN; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + num_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); + for (i = 0; i < num_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue; @@ -686,7 +689,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) count = adev->jpeg.num_jpeg_inst * adev->jpeg.num_jpeg_rings; break; case AMD_IP_BLOCK_TYPE_VCN: - count = adev->vcn.num_vcn_inst; + count = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); break; case AMD_IP_BLOCK_TYPE_UVD: count = adev->uvd.num_uvd_inst; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index db081618e85c3..66ec1a4da7343 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -358,7 +358,7 @@ static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev, struct ras_debug_if *data) { int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; - uint32_t mask, inst_mask = data->inject.instance_mask; + uint32_t mask, inst, inst_mask = data->inject.instance_mask; /* no need to set instance mask if there is only one instance */ if (num_xcc <= 1 && inst_mask) { @@ -379,7 +379,8 @@ static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev, break; case AMDGPU_RAS_BLOCK__VCN: case AMDGPU_RAS_BLOCK__JPEG: - mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0); + inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); + mask = GENMASK(inst - 1, 0); break; default: mask = inst_mask; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index e157d6d857b6e..ab7a2ba59a1bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -64,7 +64,9 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev) { - return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst); + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); + + return adev->xcp_mgr->num_xcps > num_vcn_inst; } static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, @@ -398,7 +400,7 @@ static int __aqua_vanjaram_get_xcp_ip_info(struct amdgpu_xcp_mgr *xcp_mgr, int x int num_xcc_xcp, num_sdma_xcp, num_vcn_xcp; num_sdma = adev->sdma.num_instances; - num_vcn = adev->vcn.num_vcn_inst; + num_vcn = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); num_shared_vcn = 1; num_xcc_xcp = adev->gfx.num_xcc_per_xcp; @@ -462,7 +464,7 @@ static int aqua_vanjaram_get_xcp_res_info(struct amdgpu_xcp_mgr *xcp_mgr, max_res[AMDGPU_XCP_RES_XCC] = NUM_XCC(adev->gfx.xcc_mask); max_res[AMDGPU_XCP_RES_DMA] = adev->sdma.num_instances; - max_res[AMDGPU_XCP_RES_DEC] = adev->vcn.num_vcn_inst; + max_res[AMDGPU_XCP_RES_DEC] = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); max_res[AMDGPU_XCP_RES_JPEG] = adev->jpeg.num_jpeg_inst; switch (mode) { diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 47db483c35169..a14865c02d73b 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -210,7 +210,9 @@ static const struct amdgpu_video_codecs yc_video_codecs_decode = { static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { - if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); + + if (!num_vcn_inst) return -EINVAL; switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 62ad67d0b598f..7ee3f1d2fcd24 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -150,7 +150,9 @@ static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn1 = { static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { - if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); + + if (!num_vcn_inst) return -EINVAL; switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c index 6b8e078ee7c75..8958f1e544e04 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc24.c +++ b/drivers/gpu/drm/amd/amdgpu/soc24.c @@ -74,7 +74,9 @@ static const struct amdgpu_video_codecs vcn_5_0_0_video_codecs_decode_vcn0 = { static int soc24_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { - if (adev->vcn.num_vcn_inst == hweight8(adev->vcn.harvest_config)) + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); + + if (!num_vcn_inst) return -EINVAL; switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) { diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 6a9e26905edfc..71797b06f94f8 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -81,7 +81,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, bool is_vcn = (block_type == AMD_IP_BLOCK_TYPE_UVD || block_type == AMD_IP_BLOCK_TYPE_VCN); if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state && - (!is_vcn || adev->vcn.num_vcn_inst == 1)) { + (!is_vcn || amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN) == 1)) { dev_dbg(adev->dev, "IP block%d already in the target %s state!", block_type, gate ? "gate" : "ungate"); return 0; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index e8ae7681bf0a3..adedad84cbb6a 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2093,7 +2093,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd if (!((gc_ver == IP_VERSION(10, 3, 1) || gc_ver == IP_VERSION(10, 3, 0) || gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + gc_ver == IP_VERSION(11, 0, 3)) && + amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN) >= 2)) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { if (!(gc_ver == IP_VERSION(10, 3, 1) || @@ -2115,7 +2116,8 @@ static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amd if (!((gc_ver == IP_VERSION(10, 3, 1) || gc_ver == IP_VERSION(10, 3, 0) || gc_ver == IP_VERSION(11, 0, 2) || - gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) + gc_ver == IP_VERSION(11, 0, 3)) && + amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN) >= 2)) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { if (gc_ver == IP_VERSION(9, 4, 2) || diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index e5dd0bb31c386..566edfbdad17f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -788,19 +788,20 @@ static int smu_set_default_dpm_table(struct smu_context *smu) struct smu_power_gate *power_gate = &smu_power->power_gate; int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i; int ret = 0; + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); if (!smu->ppt_funcs->set_default_dpm_table) return 0; if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { - for (i = 0; i < adev->vcn.num_vcn_inst; i++) + for (i = 0; i < num_vcn_inst; i++) vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]); } if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) jpeg_gate = atomic_read(&power_gate->jpeg_gated); if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (i = 0; i < num_vcn_inst; i++) { ret = smu_dpm_set_vcn_enable(smu, true, i); if (ret) return ret; @@ -822,7 +823,7 @@ static int smu_set_default_dpm_table(struct smu_context *smu) smu_dpm_set_jpeg_enable(smu, !jpeg_gate); err_out: if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { - for (i = 0; i < adev->vcn.num_vcn_inst; i++) + for (i = 0; i < num_vcn_inst; i++) smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i); } @@ -1279,6 +1280,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int i, ret; + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); smu->pool_size = adev->pm.smu_prv_buffer_size; smu->smu_feature.feature_num = SMU_FEATURE_MAX; @@ -1290,7 +1292,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) atomic64_set(&smu->throttle_int_counter, 0); smu->watermarks_bitmap = 0; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) + for (i = 0; i < num_vcn_inst; i++) atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); @@ -1824,6 +1826,7 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block) int i, ret; struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { smu->pm_enabled = false; @@ -1847,7 +1850,7 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block) ret = smu_set_gfx_imu_enable(smu); if (ret) return ret; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) + for (i = 0; i < num_vcn_inst; i++) smu_dpm_set_vcn_enable(smu, true, i); smu_dpm_set_jpeg_enable(smu, true); smu_dpm_set_vpe_enable(smu, true); @@ -2047,11 +2050,12 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; int i, ret; + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (i = 0; i < num_vcn_inst; i++) { smu_dpm_set_vcn_enable(smu, false, i); adev->vcn.inst[i].cur_state = AMD_PG_STATE_GATE; } @@ -2995,7 +2999,7 @@ static int smu_read_sensor(void *handle, struct amdgpu_device *adev = smu->adev; struct smu_umd_pstate_table *pstate_table = &smu->pstate_table; - int i, ret = 0; + int i, ret = 0, num_vcn_inst; uint32_t *size, size_val; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) @@ -3041,8 +3045,9 @@ static int smu_read_sensor(void *handle, *size = 4; break; case AMDGPU_PP_SENSOR_VCN_POWER_STATE: + num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); *(uint32_t *)data = 0; - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (i = 0; i < num_vcn_inst; i++) { if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) { *(uint32_t *)data = 1; break; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 19a25fdc2f5b4..5cfb68d673089 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -958,6 +958,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int i, ret = 0; DpmDescriptor_t *table_member; + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); /* socclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.soc_table; @@ -1033,7 +1034,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) } /* vclk0/1 dpm table setup */ - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (i = 0; i < num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue; @@ -1056,7 +1057,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) } /* dclk0/1 dpm table setup */ - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (i = 0; i < num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue; dpm_table = &dpm_context->dpm_tables.dclk_table; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 7bb45ff6d5c85..e61b8c5fb9d67 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1717,6 +1717,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, uint32_t fclk_min = 0, fclk_max = 0; int ret = 0, i; bool auto_level = false; + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1833,7 +1834,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, } if (vclk_min && vclk_max) { - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (i = 0; i < num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue; ret = smu_v13_0_set_soft_freq_limited_range(smu, @@ -1849,7 +1850,7 @@ int smu_v13_0_set_performance_level(struct smu_context *smu, } if (dclk_min && dclk_max) { - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (i = 0; i < num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue; ret = smu_v13_0_set_soft_freq_limited_range(smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 9b2f4fe1578b8..e660f4a663781 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -1211,6 +1211,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, uint32_t fclk_min = 0, fclk_max = 0; int ret = 0, i; bool auto_level = false; + int num_vcn_inst = amdgpu_device_ip_get_num_inst(adev, AMD_IP_BLOCK_TYPE_VCN); switch (level) { case AMD_DPM_FORCED_LEVEL_HIGH: @@ -1314,7 +1315,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, } if (vclk_min && vclk_max) { - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (i = 0; i < num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue; ret = smu_v14_0_set_soft_freq_limited_range(smu, @@ -1330,7 +1331,7 @@ int smu_v14_0_set_performance_level(struct smu_context *smu, } if (dclk_min && dclk_max) { - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + for (i = 0; i < num_vcn_inst; i++) { if (adev->vcn.harvest_config & (1 << i)) continue; ret = smu_v14_0_set_soft_freq_limited_range(smu, -- 2.47.1