From: "Jesse.zhang@xxxxxxx" <Jesse.zhang@xxxxxxx> This patch refactors the SDMA v5.0 reset logic by splitting the `sdma_v5_0_reset_queue` function into two separate functions: `sdma_v5_0_stop_queue` and `sdma_v5_0_restore_queue`. This change aligns with the new SDMA reset mechanism, where the reset process is divided into stopping the queue, performing the reset, and restoring the queue. 1. **Split `sdma_v5_0_reset_queue`**: - Extracted the queue stopping logic into `sdma_v5_0_stop_queue`. - Extracted the queue restoration logic into `sdma_v5_0_restore_queue`. - The soft reset step is now handled by the caller (`amdgpu_sdma_reset_engine`). 2. **Update Ring Functions**: - Added `stop_queue` and `start_queue` to the `sdma_v5_0_ring_funcs` structure to support the new reset mechanism. Signed-off-by: Jesse Zhang <jesse.zhang@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c | 37 +++++++++++++++----------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 387a626936b3..9458d95a4795 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1574,17 +1574,23 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block) static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) { struct amdgpu_device *adev = ring->adev; - int j, r; - u32 f32_cntl, freeze, cntl, preempt, soft_reset, stat1_reg; - u32 inst_id; + u32 inst_id = ring->me; + + return amdgpu_sdma_reset_engine(adev, inst_id, true); +} + +static int sdma_v5_0_stop_queue(struct amdgpu_device *adev, uint32_t inst_id) +{ + int j, r = 0; + u32 f32_cntl, freeze, cntl, preempt, stat1_reg; if (amdgpu_sriov_vf(adev)) return -EINVAL; - inst_id = ring->me; + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* stop queue */ - sdma_v5_0_gfx_stop(adev, 1 << ring->me); + sdma_v5_0_gfx_stop(adev, inst_id); /* engine stop SDMA1_F32_CNTL.HALT to 1 and SDMAx_FREEZE freeze bit to 1 */ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE)); @@ -1620,17 +1626,17 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) preempt = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_GFX_PREEMPT)); preempt = REG_SET_FIELD(preempt, SDMA0_GFX_PREEMPT, IB_PREEMPT, 0); WREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_GFX_PREEMPT), preempt); +err0: + amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; +} - soft_reset = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET); - soft_reset |= 1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << inst_id; - - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); - - udelay(50); - - soft_reset &= ~(1 << GRBM_SOFT_RESET__SOFT_RESET_SDMA0__SHIFT << inst_id); - WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, soft_reset); +static int sdma_v5_0_restore_queue(struct amdgpu_device *adev, uint32_t inst_id) +{ + int r; + u32 freeze; + amdgpu_gfx_rlc_enter_safe_mode(adev, 0); /* unfreeze*/ freeze = RREG32(sdma_v5_0_get_reg_offset(adev, inst_id, mmSDMA0_FREEZE)); freeze = REG_SET_FIELD(freeze, SDMA0_FREEZE, FREEZE, 0); @@ -1638,7 +1644,6 @@ static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true); -err0: amdgpu_gfx_rlc_exit_safe_mode(adev, 0); return r; } @@ -1985,6 +1990,8 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = { .init_cond_exec = sdma_v5_0_ring_init_cond_exec, .preempt_ib = sdma_v5_0_ring_preempt_ib, .reset = sdma_v5_0_reset_queue, + .stop_queue = sdma_v5_0_stop_queue, + .start_queue = sdma_v5_0_restore_queue, }; static void sdma_v5_0_set_ring_funcs(struct amdgpu_device *adev) -- 2.25.1