every vm uses itself recover entity to recovery page table from shadow. Change-Id: I93e37666cb3fb511311c96ff172b6e9ebd337547 Signed-off-by: Chunming Zhou <David1.Zhou at amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 21 ++++++++++++++------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 9f7fae0..98f631a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -911,7 +911,8 @@ struct amdgpu_vm { /* Scheduler entity for page table updates */ struct amd_sched_entity entity; - + struct amd_sched_entity recover_entity; + struct amdgpu_ring *ring; /* client id */ u64 client_id; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 1d58577..6d2a28a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -714,13 +714,13 @@ error_free: } static int amdgpu_vm_recover_bo_from_shadow(struct amdgpu_device *adev, + struct amdgpu_vm *vm, struct amdgpu_bo *bo, struct amdgpu_bo *bo_shadow, struct reservation_object *resv, struct fence **fence) { - struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; int r; uint64_t vram_addr, gtt_addr; @@ -739,8 +739,8 @@ static int amdgpu_vm_recover_bo_from_shadow(struct amdgpu_device *adev, if (r) goto err3; - r = amdgpu_copy_buffer(ring, &adev->mman.entity, gtt_addr, vram_addr, - amdgpu_bo_size(bo), resv, fence); + r = amdgpu_copy_buffer(vm->ring, &vm->recover_entity, gtt_addr, + vram_addr, amdgpu_bo_size(bo), resv, fence); if (!r) amdgpu_bo_fence(bo, *fence, true); @@ -767,7 +767,7 @@ int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev, if (unlikely(r != 0)) return r; - r = amdgpu_vm_recover_bo_from_shadow(adev, vm->page_directory, + r = amdgpu_vm_recover_bo_from_shadow(adev, vm, vm->page_directory, vm->page_directory->shadow, NULL, &fence); if (r) { @@ -784,7 +784,7 @@ int amdgpu_vm_recover_page_table_from_shadow(struct amdgpu_device *adev, if (!bo || !bo_shadow) continue; - r = amdgpu_vm_recover_bo_from_shadow(adev, bo, bo_shadow, + r = amdgpu_vm_recover_bo_from_shadow(adev, vm, bo, bo_shadow, NULL, &fence); if (r) { DRM_ERROR("recover page table failed!\n"); @@ -1678,12 +1678,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring); ring_instance %= adev->vm_manager.vm_pte_num_rings; ring = adev->vm_manager.vm_pte_rings[ring_instance]; + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_RECOVER]; + r = amd_sched_entity_init(&ring->sched, &vm->recover_entity, + rq, amdgpu_sched_jobs); + if (r) + goto err; rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; r = amd_sched_entity_init(&ring->sched, &vm->entity, rq, amdgpu_sched_jobs); if (r) - goto err; - + goto err1; + vm->ring = ring; vm->page_directory_fence = NULL; r = amdgpu_bo_create(adev, pd_size, align, true, @@ -1725,6 +1730,8 @@ error_free_page_directory: error_free_sched_entity: amd_sched_entity_fini(&ring->sched, &vm->entity); +err1: + amd_sched_entity_fini(&ring->sched, &vm->recover_entity); err: drm_free_large(vm->page_tables); -- 1.9.1