Pass the vcn instance structure to these functions rather than adev and the instance number. TODO: clean up the function internals to use the vinst state directly rather than accessing it indirectly via adev->vcn.inst[]. Signed-off-by: Alex Deucher <alexander.deucher@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 79 +++++++++++++++++---------- 1 file changed, 50 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 4f9f3de023ffd..69cf42a4a3f18 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -468,13 +468,14 @@ static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block) /** * vcn_v2_5_mc_resume - memory controller programming * - * @adev: amdgpu_device pointer - * @i: instance to resume + * @vinst: VCN instance * * Let the VCN memory controller know it's offsets */ -static void vcn_v2_5_mc_resume(struct amdgpu_device *adev, int i) +static void vcn_v2_5_mc_resume(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; uint32_t size; uint32_t offset; @@ -527,8 +528,11 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev, int i) AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); } -static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, + bool indirect) { + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4); uint32_t offset; @@ -626,13 +630,14 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx /** * vcn_v2_5_disable_clock_gating - disable VCN clock gating * - * @adev: amdgpu_device pointer - * @i: instance to disable clockgating on + * @vinst: VCN instance * * Disable clock gating for VCN block */ -static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev, int i) +static void vcn_v2_5_disable_clock_gating(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; uint32_t data; if (adev->vcn.harvest_config & (1 << i)) @@ -738,9 +743,11 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev, int i) WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data); } -static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev, - uint8_t sram_sel, int inst_idx, uint8_t indirect) +static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst, + uint8_t sram_sel, uint8_t indirect) { + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; uint32_t reg_data = 0; /* enable sw clock gating control */ @@ -789,13 +796,14 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev, /** * vcn_v2_5_enable_clock_gating_inst - enable VCN clock gating * - * @adev: amdgpu_device pointer - * @i: instance to enable clockgating on + * @vinst: VCN instance * * Enable clock gating for VCN block */ -static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev, int i) +static void vcn_v2_5_enable_clock_gating(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; uint32_t data = 0; if (adev->vcn.harvest_config & (1 << i)) @@ -846,9 +854,11 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev, int i) WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data); } -static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx, +static void vcn_v2_6_enable_ras(struct amdgpu_vcn_inst *vinst, bool indirect) { + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; uint32_t tmp; if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0)) @@ -873,8 +883,10 @@ static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx, tmp, 0, indirect); } -static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) { + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; @@ -892,7 +904,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; /* enable clock gating */ - vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect); + vcn_v2_5_clock_gating_dpg_mode(vinst, 0, indirect); /* enable VCPU clock */ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); @@ -941,7 +953,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); - vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect); + vcn_v2_5_mc_resume_dpg_mode(vinst, indirect); WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); @@ -952,7 +964,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect); - vcn_v2_6_enable_ras(adev, inst_idx, indirect); + vcn_v2_6_enable_ras(vinst, indirect); /* unblock VCPU register access */ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( @@ -1017,8 +1029,10 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo return 0; } -static int vcn_v2_5_start(struct amdgpu_device *adev, int i) +static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; struct amdgpu_ring *ring; @@ -1032,7 +1046,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev, int i) amdgpu_dpm_enable_vcn(adev, true, i); if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) - return vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.inst[i].indirect_sram); + return vcn_v2_5_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram); /* disable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0, @@ -1046,7 +1060,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev, int i) return 0; /* SW clock gating */ - vcn_v2_5_disable_clock_gating(adev, i); + vcn_v2_5_disable_clock_gating(vinst); /* enable VCPU clock */ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), @@ -1091,7 +1105,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev, int i) (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); - vcn_v2_5_mc_resume(adev, i); + vcn_v2_5_mc_resume(vinst); /* VCN global tiling registers */ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG, @@ -1396,8 +1410,10 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table); } -static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) +static int vcn_v2_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; uint32_t tmp; /* Wait for power status to be 1 */ @@ -1424,15 +1440,17 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) return 0; } -static int vcn_v2_5_stop(struct amdgpu_device *adev, int i) +static int vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; uint32_t tmp; int r; if (adev->vcn.harvest_config & (1 << i)) return 0; if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) - return vcn_v2_5_stop_dpg_mode(adev, i); + return vcn_v2_5_stop_dpg_mode(vinst); /* wait for vcn idle */ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); @@ -1475,7 +1493,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev, int i) /* clear status */ WREG32_SOC15(VCN, i, mmUVD_STATUS, 0); - vcn_v2_5_enable_clock_gating(adev, i); + vcn_v2_5_enable_clock_gating(vinst); /* enable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), @@ -1810,12 +1828,14 @@ static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block, return 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; + if (enable) { if (!vcn_v2_5_is_idle(adev)) return -EBUSY; - vcn_v2_5_enable_clock_gating(adev, i); + vcn_v2_5_enable_clock_gating(vinst); } else { - vcn_v2_5_disable_clock_gating(adev, i); + vcn_v2_5_disable_clock_gating(vinst); } } @@ -1827,6 +1847,7 @@ static int vcn_v2_5_set_powergating_state_inst(struct amdgpu_ip_block *ip_block, int i) { struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; int ret; if (amdgpu_sriov_vf(adev)) @@ -1836,9 +1857,9 @@ static int vcn_v2_5_set_powergating_state_inst(struct amdgpu_ip_block *ip_block, return 0; if (state == AMD_PG_STATE_GATE) - ret = vcn_v2_5_stop(adev, i); + ret = vcn_v2_5_stop(vinst); else - ret = vcn_v2_5_start(adev, i); + ret = vcn_v2_5_start(vinst); if (!ret) adev->vcn.inst[i].cur_state = state; -- 2.47.1