On Tue, Oct 29, 2024 at 1:53 PM <boyuan.zhang@xxxxxxx> wrote: > > From: Boyuan Zhang <boyuan.zhang@xxxxxxx> > > First, add an instance parameter to smu_dpm_set_vcn_enable() function, > and calling dpm_set_vcn_enable() with this given instance. > > Second, modify vcn_gated to be an array, to track the gating status > for each vcn instance separately. > > With these 2 changes, smu_dpm_set_vcn_enable() will check and set the > gating status for the given vcn instance ONLY. > > v2: remove duplicated functions. > > remove for-loop in dpm_set_vcn_enable(), and temporarily move it to > to smu_dpm_set_power_gate(), in order to keep the exact same logic as > before, until further separation in next patch. > > v3: add instance number in error message. > > v4: declaring i at the top of the function. > > Signed-off-by: Boyuan Zhang <boyuan.zhang@xxxxxxx> > Acked-by: Christian König <christian.koenig@xxxxxxx> Reviewed-by: Alex Deucher <alexander.deucher@xxxxxxx> > --- > drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 75 ++++++++++++------- > drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 2 +- > 2 files changed, 47 insertions(+), 30 deletions(-) > > diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c > index ccacba56159e..dfbec2e2ec20 100644 > --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c > +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c > @@ -234,11 +234,11 @@ static bool is_vcn_enabled(struct amdgpu_device *adev) > } > > static int smu_dpm_set_vcn_enable(struct smu_context *smu, > - bool enable) > + bool enable, > + int inst) > { > struct smu_power_context *smu_power = &smu->smu_power; > struct smu_power_gate *power_gate = &smu_power->power_gate; > - struct amdgpu_device *adev = smu->adev; > int ret = 0; > > /* > @@ -250,14 +250,12 @@ static int smu_dpm_set_vcn_enable(struct smu_context *smu, > if (!smu->ppt_funcs->dpm_set_vcn_enable) > return 0; > > - if (atomic_read(&power_gate->vcn_gated) ^ enable) > + if (atomic_read(&power_gate->vcn_gated[inst]) ^ enable) > return 0; > > - for (int i = 0; i < adev->vcn.num_vcn_inst; i++) { > - ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, i); > - if (ret) > - return ret; > - } > + ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable, inst); > + if (!ret) > + atomic_set(&power_gate->vcn_gated[inst], !enable); > > return ret; > } > @@ -359,7 +357,8 @@ static int smu_dpm_set_power_gate(void *handle, > bool gate) > { > struct smu_context *smu = handle; > - int ret = 0; > + struct amdgpu_device *adev = smu->adev; > + int i, ret = 0; > > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) { > dev_WARN(smu->adev->dev, > @@ -375,10 +374,12 @@ static int smu_dpm_set_power_gate(void *handle, > */ > case AMD_IP_BLOCK_TYPE_UVD: > case AMD_IP_BLOCK_TYPE_VCN: > - ret = smu_dpm_set_vcn_enable(smu, !gate); > - if (ret) > - dev_err(smu->adev->dev, "Failed to power %s VCN!\n", > - gate ? "gate" : "ungate"); > + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { > + ret = smu_dpm_set_vcn_enable(smu, !gate, i); > + if (ret) > + dev_err(smu->adev->dev, "Failed to power %s VCN instance %d!\n", > + gate ? "gate" : "ungate", i); > + } > break; > case AMD_IP_BLOCK_TYPE_GFX: > ret = smu_gfx_off_control(smu, gate); > @@ -780,21 +781,25 @@ static int smu_set_default_dpm_table(struct smu_context *smu) > struct amdgpu_device *adev = smu->adev; > struct smu_power_context *smu_power = &smu->smu_power; > struct smu_power_gate *power_gate = &smu_power->power_gate; > - int vcn_gate, jpeg_gate; > + int vcn_gate[AMDGPU_MAX_VCN_INSTANCES], jpeg_gate, i; > int ret = 0; > > if (!smu->ppt_funcs->set_default_dpm_table) > return 0; > > - if (adev->pg_flags & AMD_PG_SUPPORT_VCN) > - vcn_gate = atomic_read(&power_gate->vcn_gated); > + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { > + for (i = 0; i < adev->vcn.num_vcn_inst; i++) > + vcn_gate[i] = atomic_read(&power_gate->vcn_gated[i]); > + } > if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) > jpeg_gate = atomic_read(&power_gate->jpeg_gated); > > if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { > - ret = smu_dpm_set_vcn_enable(smu, true); > - if (ret) > - return ret; > + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { > + ret = smu_dpm_set_vcn_enable(smu, true, i); > + if (ret) > + return ret; > + } > } > > if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) { > @@ -811,8 +816,10 @@ static int smu_set_default_dpm_table(struct smu_context *smu) > if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) > smu_dpm_set_jpeg_enable(smu, !jpeg_gate); > err_out: > - if (adev->pg_flags & AMD_PG_SUPPORT_VCN) > - smu_dpm_set_vcn_enable(smu, !vcn_gate); > + if (adev->pg_flags & AMD_PG_SUPPORT_VCN) { > + for (i = 0; i < adev->vcn.num_vcn_inst; i++) > + smu_dpm_set_vcn_enable(smu, !vcn_gate[i], i); > + } > > return ret; > } > @@ -1251,7 +1258,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) > { > struct amdgpu_device *adev = ip_block->adev; > struct smu_context *smu = adev->powerplay.pp_handle; > - int ret; > + int i, ret; > > smu->pool_size = adev->pm.smu_prv_buffer_size; > smu->smu_feature.feature_num = SMU_FEATURE_MAX; > @@ -1265,7 +1272,8 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) > smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; > smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; > > - atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); > + for (i = 0; i < adev->vcn.num_vcn_inst; i++) > + atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1); > atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); > atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); > atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); > @@ -1806,7 +1814,7 @@ static int smu_start_smc_engine(struct smu_context *smu) > > static int smu_hw_init(struct amdgpu_ip_block *ip_block) > { > - int ret; > + int i, ret; > struct amdgpu_device *adev = ip_block->adev; > struct smu_context *smu = adev->powerplay.pp_handle; > > @@ -1832,7 +1840,8 @@ static int smu_hw_init(struct amdgpu_ip_block *ip_block) > ret = smu_set_gfx_imu_enable(smu); > if (ret) > return ret; > - smu_dpm_set_vcn_enable(smu, true); > + for (i = 0; i < adev->vcn.num_vcn_inst; i++) > + smu_dpm_set_vcn_enable(smu, true, i); > smu_dpm_set_jpeg_enable(smu, true); > smu_dpm_set_vpe_enable(smu, true); > smu_dpm_set_umsch_mm_enable(smu, true); > @@ -2030,12 +2039,13 @@ static int smu_hw_fini(struct amdgpu_ip_block *ip_block) > { > struct amdgpu_device *adev = ip_block->adev; > struct smu_context *smu = adev->powerplay.pp_handle; > - int ret; > + int i, ret; > > if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) > return 0; > > - smu_dpm_set_vcn_enable(smu, false); > + for (i = 0; i < adev->vcn.num_vcn_inst; i++) > + smu_dpm_set_vcn_enable(smu, false, i); > smu_dpm_set_jpeg_enable(smu, false); > smu_dpm_set_vpe_enable(smu, false); > smu_dpm_set_umsch_mm_enable(smu, false); > @@ -2949,9 +2959,10 @@ static int smu_read_sensor(void *handle, > int *size_arg) > { > struct smu_context *smu = handle; > + struct amdgpu_device *adev = smu->adev; > struct smu_umd_pstate_table *pstate_table = > &smu->pstate_table; > - int ret = 0; > + int i, ret = 0; > uint32_t *size, size_val; > > if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) > @@ -2997,7 +3008,13 @@ static int smu_read_sensor(void *handle, > *size = 4; > break; > case AMDGPU_PP_SENSOR_VCN_POWER_STATE: > - *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1; > + *(uint32_t *)data = 0; > + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { > + if (!atomic_read(&smu->smu_power.power_gate.vcn_gated[i])) { > + *(uint32_t *)data = 1; > + break; > + } > + } > *size = 4; > break; > case AMDGPU_PP_SENSOR_MIN_FAN_RPM: > diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h > index 4ebcc1e53ea2..06d817fb84aa 100644 > --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h > +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h > @@ -399,7 +399,7 @@ struct smu_dpm_context { > struct smu_power_gate { > bool uvd_gated; > bool vce_gated; > - atomic_t vcn_gated; > + atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES]; > atomic_t jpeg_gated; > atomic_t vpe_gated; > atomic_t umsch_mm_gated; > -- > 2.34.1 >