Since the shadow is in GTT, shadow itslef pte isn't in shadow, We need to do sync before initialization is completed Change-Id: I29b433da6c71fc790a32ef202dd85a72ab6b5787 Signed-off-by: Chunming Zhou <David1.Zhou at amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 47 +++++++++++++++++++++++++++++++- 2 files changed, 47 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 3ee01fe..4cad4b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -648,6 +648,7 @@ struct amdgpu_gart { bool ready; const struct amdgpu_gart_funcs *gart_funcs; struct amd_sched_entity recover_entity; + u64 shadow_gpu_addr; }; int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index baeaee2..e99c8a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -421,10 +421,46 @@ int amdgpu_gart_table_vram_shadow_pin(struct amdgpu_device *adev) r = amdgpu_bo_kmap(adev->gart.robj->shadow, &adev->gart.shadow_ptr); if (r) amdgpu_bo_unpin(adev->gart.robj->shadow); + adev->gart.shadow_gpu_addr = gpu_addr; amdgpu_bo_unreserve(adev->gart.robj->shadow); return r; } +/* Since the shadow is in GTT, shadow itslef pte isn't in shadow, + We need to do sync before initialization is completed */ +static int amdgpu_gart_table_shadow_sync(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + struct amd_sched_entity *entity = &adev->gart.recover_entity; + struct fence *fence; + u64 vram_addr = adev->gart.table_addr; + u64 shadow_addr = adev->gart.shadow_gpu_addr; + int r; + + if (!adev->gart.ready) { + DRM_ERROR("cannot sync gart table for shadow.\n"); + return -EINVAL; + } + if (!amdgpu_vm_need_backup(adev) || !adev->gart.robj || + !adev->gart.robj->shadow) + return 0; + r = amdgpu_bo_reserve(adev->gart.robj->shadow, false); + if (unlikely(r != 0)) + return r; + /* if adev->gart.ready, means both gart bo and shadow bo are pinned */ + r = amdgpu_copy_buffer(ring, entity, vram_addr, + shadow_addr, amdgpu_bo_size(adev->gart.robj), + adev->gart.robj->tbo.resv, &fence); + if (!r) + amdgpu_bo_fence(adev->gart.robj, fence, true); + + amdgpu_bo_unreserve(adev->gart.robj->shadow); + r = fence_wait(fence, true); + fence_put(fence); + + return r; +} + void amdgpu_gart_table_vram_shadow_unpin(struct amdgpu_device *adev) { int r; @@ -445,10 +481,19 @@ int amdgpu_gart_late_init(struct amdgpu_device *adev) { struct amd_sched_rq *rq; struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + int r; rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_RECOVER]; - return amd_sched_entity_init(&ring->sched, &adev->gart.recover_entity, + r = amd_sched_entity_init(&ring->sched, &adev->gart.recover_entity, rq, amdgpu_sched_jobs); + if (r) + return r; + r = amdgpu_gart_table_shadow_sync(adev); + if (r) { + DRM_ERROR("sync gart table failed (%d).\n", r); + amd_sched_entity_fini(&ring->sched, &adev->gart.recover_entity); + } + return r; } void amdgpu_gart_late_fini(struct amdgpu_device *adev) -- 1.9.1