add soft reset sequence for vcn v3_0 Signed-off-by: Victor Zhao <Victor.Zhao@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 87 ++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index da11ceba0698..8c9b1b84384b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -2247,6 +2247,89 @@ static void vcn_v3_0_set_irq_funcs(struct amdgpu_device *adev) } } +static bool vcn_v3_0_check_soft_reset(void *handle, struct amdgpu_job *job) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i; + + if (job) { + if (!strncmp(job->base.sched->name, "vcn", 3)) + return true; + else + return false; + } else { + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + uint32_t tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS); + + if (tmp == 0xDEADBEEF) + continue; + else if (tmp & UVD_STATUS__BUSY) + return true; + } + return false; + } +} + +static int vcn_v3_0_soft_reset(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + uint32_t tmp; + int i = 0; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + /* disable LMI UMC channel */ + tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2); + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; + WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp); + + /* block VCPU register access */ + tmp = RREG32_SOC15(VCN, i, mmUVD_RB_ARB_CTRL); + tmp |= UVD_RB_ARB_CTRL__VCPU_DIS_MASK; + WREG32_SOC15(VCN, i, mmUVD_RB_ARB_CTRL, tmp); + + /* reset VCPU */ + tmp = RREG32_SOC15(VCN, i, mmUVD_VCPU_CNTL); + tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; + WREG32_SOC15(VCN, i, mmUVD_VCPU_CNTL, tmp); + + /* disable VCPU clock */ + tmp = RREG32_SOC15(VCN, i, mmUVD_VCPU_CNTL); + tmp &= ~(UVD_VCPU_CNTL__CLK_EN_MASK); + WREG32_SOC15(VCN, i, mmUVD_VCPU_CNTL, tmp); + + /* apply soft reset */ + tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); + tmp = RREG32_SOC15(VCN, i, mmUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + WREG32_SOC15(VCN, i, mmUVD_SOFT_RESET, tmp); + + tmp = RREG32_SOC15(VCN, i, mmUVD_VCPU_CNTL); + tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; + WREG32_SOC15(VCN, i, mmUVD_VCPU_CNTL, tmp); + + /* clear status */ + WREG32_SOC15(VCN, i, mmUVD_STATUS, 0); + + /* apply HW clock gating */ + vcn_v3_0_enable_clock_gating(adev, i); + + /* enable VCN power gating */ + vcn_v3_0_enable_static_power_gating(adev, i); + } + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + /* disable VCN power gating */ + vcn_v3_0_disable_static_power_gating(adev, i); + + /*SW clock gating */ + vcn_v3_0_disable_clock_gating(adev, i); + } + + return 0; +} + static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .name = "vcn_v3_0", .early_init = vcn_v3_0_early_init, @@ -2259,9 +2342,9 @@ static const struct amd_ip_funcs vcn_v3_0_ip_funcs = { .resume = vcn_v3_0_resume, .is_idle = vcn_v3_0_is_idle, .wait_for_idle = vcn_v3_0_wait_for_idle, - .check_soft_reset = NULL, + .check_soft_reset = vcn_v3_0_check_soft_reset, .pre_soft_reset = NULL, - .soft_reset = NULL, + .soft_reset = vcn_v3_0_soft_reset, .post_soft_reset = NULL, .set_clockgating_state = vcn_v3_0_set_clockgating_state, .set_powergating_state = vcn_v3_0_set_powergating_state, -- 2.25.1