Handle HQD deactivation timeouts instead of ignoring them. Reviewed-by: Edward O'Callaghan <funfunctor at folklore1984.net> Acked-by: Christian König <christian.koenig at amd.com> Acked-by: Felix Kuehling <Felix.Kuehling at amd.com> Signed-off-by: Andres Rodriguez <andresx7 at gmail.com> --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index b670302..cd1af26 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4947,75 +4947,89 @@ static int gfx_v8_0_mqd_commit(struct amdgpu_ring *ring) /* enable the doorbell if requested */ WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr); /* set the vmid for the queue */ WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid); WREG32(mmCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state); /* activate the queue */ WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active); return 0; } static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring) { + int r = 0; struct amdgpu_device *adev = ring->adev; struct vi_mqd *mqd = ring->mqd_ptr; int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS; gfx_v8_0_kiq_setting(ring); if (adev->gfx.in_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset ring buffer */ ring->wptr = 0; amdgpu_ring_clear_ring(ring); mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); - gfx_v8_0_deactivate_hqd(adev, 1); + r = gfx_v8_0_deactivate_hqd(adev, 1); + if (r) { + dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name); + goto out_unlock; + } gfx_v8_0_mqd_commit(ring); vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } else { mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); - gfx_v8_0_deactivate_hqd(adev, 1); + r = gfx_v8_0_deactivate_hqd(adev, 1); + if (r) { + dev_err(adev->dev, "failed to deactivate ring %s\n", ring->name); + goto out_unlock; + } gfx_v8_0_mqd_commit(ring); vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } - return 0; + return r; + +out_unlock: + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + return r; } static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct vi_mqd *mqd = ring->mqd_ptr; int mqd_idx = ring - &adev->gfx.compute_ring[0]; if (!adev->gfx.in_reset && !adev->gfx.in_suspend) { mutex_lock(&adev->srbm_mutex); vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); gfx_v8_0_mqd_init(ring); vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); if (adev->gfx.mec.mqd_backup[mqd_idx]) memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else if (adev->gfx.in_reset) { /* for GPU_RESET case */ /* reset MQD to a clean status */ if (adev->gfx.mec.mqd_backup[mqd_idx]) -- 2.9.3