Change-Id: I963598ba6eb44bc8620d70e026c0175d1a1de120 Signed-off-by: Chunming Zhou <David1.Zhou at amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4d7d305..dcd9ad4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2113,19 +2113,42 @@ retry: amdgpu_atombios_scratch_regs_restore(adev); } if (!r) { + struct amdgpu_ring *buffer_ring = adev->mman.buffer_funcs_ring; + r = amdgpu_ib_ring_tests(adev); if (r) { dev_err(adev->dev, "ib ring test failed (%d).\n", r); r = amdgpu_suspend(adev); + need_full_reset = true; goto retry; } - + /** + * recovery vm page tables, since we cannot depend on VRAM is no problem + * after gpu full reset. + */ + if (need_full_reset) { + struct amdgpu_vm *vm; + + amd_sched_rq_block_entity( + &buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], true); + kthread_unpark(buffer_ring->sched.thread); + spin_lock(&adev->vm_list_lock); + list_for_each_entry(vm, &adev->vm_list, list) { + spin_unlock(&adev->vm_list_lock); + amdgpu_vm_recover_page_table_from_shadow(adev, vm); + spin_lock(&adev->vm_list_lock); + } + spin_unlock(&adev->vm_list_lock); + amd_sched_rq_block_entity( + &buffer_ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL], false); + } for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring) continue; amd_sched_job_recovery(&ring->sched); - kthread_unpark(ring->sched.thread); + if (ring != buffer_ring || !need_full_reset) + kthread_unpark(ring->sched.thread); } } else { dev_err(adev->dev, "asic resume failed (%d).\n", r); -- 1.9.1