From: Christian König <christian.koenig@xxxxxxx> Allows us to get the size for all levels as well. Change-Id: Iaf2f9b2bf19c3623018a2215f8cf01a61bdbe8ea Signed-off-by: Christian König <christian.koenig at amd.com> Reviewed-by: Alex Deucher <alexander.deucher at amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 34 ++++++++++++++++++++++------------ 1 file changed, 22 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9172954..90494ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -76,27 +76,37 @@ struct amdgpu_pte_update_params { }; /** - * amdgpu_vm_num_pde - return the number of page directory entries + * amdgpu_vm_num_entries - return the number of entries in a PD/PT * * @adev: amdgpu_device pointer * - * Calculate the number of page directory entries. + * Calculate the number of entries in a page directory or page table. */ -static unsigned amdgpu_vm_num_pdes(struct amdgpu_device *adev) +static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, + unsigned level) { - return adev->vm_manager.max_pfn >> amdgpu_vm_block_size; + if (level == 0) + /* For the root directory */ + return adev->vm_manager.max_pfn >> + (amdgpu_vm_block_size * adev->vm_manager.num_level); + else if (level == adev->vm_manager.num_level) + /* For the page tables on the leaves */ + return AMDGPU_VM_PTE_COUNT; + else + /* Everything in between */ + return 1 << amdgpu_vm_block_size; } /** - * amdgpu_vm_directory_size - returns the size of the page directory in bytes + * amdgpu_vm_bo_size - returns the size of the BOs in bytes * * @adev: amdgpu_device pointer * - * Calculate the size of the page directory in bytes. + * Calculate the size of the BO for a page directory or page table in bytes. */ -static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) +static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level) { - return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev) * 8); + return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_entries(adev, level) * 8); } /** @@ -1393,7 +1403,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, saddr >>= amdgpu_vm_block_size; eaddr >>= amdgpu_vm_block_size; - BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev)); + BUG_ON(eaddr >= amdgpu_vm_num_entries(adev, 0)); if (eaddr > vm->root.last_entry_used) vm->root.last_entry_used = eaddr; @@ -1576,8 +1586,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) INIT_LIST_HEAD(&vm->cleared); INIT_LIST_HEAD(&vm->freed); - pd_size = amdgpu_vm_directory_size(adev); - pd_entries = amdgpu_vm_num_pdes(adev); + pd_size = amdgpu_vm_bo_size(adev, 0); + pd_entries = amdgpu_vm_num_entries(adev, 0); /* allocate page table array */ vm->root.entries = drm_calloc_large(pd_entries, @@ -1662,7 +1672,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) kfree(mapping); } - for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) { + for (i = 0; i < amdgpu_vm_num_entries(adev, 0); i++) { struct amdgpu_bo *pt = vm->root.entries[i].bo; if (!pt) -- 1.9.1