[PATCH 04/18] drm/amdgpu: generalize page table level

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Christian König <christian.koenig@xxxxxxx>

No functional change, but the base for multi level page tables.

Change-Id: If5729be07e15cc8618ae7bce15c6b27aa4f24393
Signed-off-by: Christian König <christian.koenig at amd.com>
Reviewed-by: Alex Deucher <alexander.deucher at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 87 +++++++++++++++++-----------------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |  9 ++--
 3 files changed, 50 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0e5d851..d9308cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -873,7 +873,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
 	}
 
 	if (p->job->vm) {
-		p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+		p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
 
 		r = amdgpu_bo_vm_update_pte(p, vm);
 		if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 1f27300..9172954 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -113,9 +113,9 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
 			 struct list_head *validated,
 			 struct amdgpu_bo_list_entry *entry)
 {
-	entry->robj = vm->page_directory;
+	entry->robj = vm->root.bo;
 	entry->priority = 0;
-	entry->tv.bo = &vm->page_directory->tbo;
+	entry->tv.bo = &entry->robj->tbo;
 	entry->tv.shared = true;
 	entry->user_pages = NULL;
 	list_add(&entry->tv.head, validated);
@@ -147,8 +147,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 		return 0;
 
 	/* add the vm page table to the list */
-	for (i = 0; i <= vm->max_pde_used; ++i) {
-		struct amdgpu_bo *bo = vm->page_tables[i].bo;
+	for (i = 0; i <= vm->root.last_entry_used; ++i) {
+		struct amdgpu_bo *bo = vm->root.entries[i].bo;
 
 		if (!bo)
 			continue;
@@ -176,8 +176,8 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
 	unsigned i;
 
 	spin_lock(&glob->lru_lock);
-	for (i = 0; i <= vm->max_pde_used; ++i) {
-		struct amdgpu_bo *bo = vm->page_tables[i].bo;
+	for (i = 0; i <= vm->root.last_entry_used; ++i) {
+		struct amdgpu_bo *bo = vm->root.entries[i].bo;
 
 		if (!bo)
 			continue;
@@ -597,15 +597,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 	int r;
 
 	ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
-	shadow = vm->page_directory->shadow;
+	shadow = vm->root.bo->shadow;
 
 	/* padding, etc. */
 	ndw = 64;
 
 	/* assume the worst case */
-	ndw += vm->max_pde_used * 6;
+	ndw += vm->root.last_entry_used * 6;
 
-	pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
+	pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
 	if (shadow) {
 		r = amdgpu_ttm_bind(&shadow->tbo, &shadow->tbo.mem);
 		if (r)
@@ -625,8 +625,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 	params.ib = &job->ibs[0];
 
 	/* walk over the address space and update the page directory */
-	for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
-		struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo;
+	for (pt_idx = 0; pt_idx <= vm->root.last_entry_used; ++pt_idx) {
+		struct amdgpu_bo *bo = vm->root.entries[pt_idx].bo;
 		uint64_t pde, pt;
 
 		if (bo == NULL)
@@ -642,10 +642,10 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 		}
 
 		pt = amdgpu_bo_gpu_offset(bo);
-		if (vm->page_tables[pt_idx].addr == pt)
+		if (vm->root.entries[pt_idx].addr == pt)
 			continue;
 
-		vm->page_tables[pt_idx].addr = pt;
+		vm->root.entries[pt_idx].addr = pt;
 
 		pde = pd_addr + pt_idx * 8;
 		if (((last_pde + 8 * count) != pde) ||
@@ -680,7 +680,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 	if (count) {
 		uint64_t pt_addr = amdgpu_vm_adjust_mc_addr(adev, last_pt);
 
-		if (vm->page_directory->shadow)
+		if (vm->root.bo->shadow)
 			amdgpu_vm_do_set_ptes(&params, last_shadow, pt_addr,
 					      count, incr, AMDGPU_PTE_VALID);
 
@@ -694,7 +694,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 	}
 
 	amdgpu_ring_pad_ib(ring, params.ib);
-	amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+	amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
 			 AMDGPU_FENCE_OWNER_VM);
 	if (shadow)
 		amdgpu_sync_resv(adev, &job->sync, shadow->tbo.resv,
@@ -706,7 +706,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
 	if (r)
 		goto error_free;
 
-	amdgpu_bo_fence(vm->page_directory, fence, true);
+	amdgpu_bo_fence(vm->root.bo, fence, true);
 	fence_put(vm->last_dir_update);
 	vm->last_dir_update = fence_get(fence);
 	fence_put(fence);
@@ -746,7 +746,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
 	/* initialize the variables */
 	addr = start;
 	pt_idx = addr >> amdgpu_vm_block_size;
-	pt = params->vm->page_tables[pt_idx].bo;
+	pt = params->vm->root.entries[pt_idx].bo;
 	if (params->shadow) {
 		if (!pt->shadow)
 			return;
@@ -769,7 +769,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
 	/* walk over the address space and update the page tables */
 	while (addr < end) {
 		pt_idx = addr >> amdgpu_vm_block_size;
-		pt = params->vm->page_tables[pt_idx].bo;
+		pt = params->vm->root.entries[pt_idx].bo;
 		if (params->shadow) {
 			if (!pt->shadow)
 				return;
@@ -983,12 +983,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 	if (r)
 		goto error_free;
 
-	r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
+	r = amdgpu_sync_resv(adev, &job->sync, vm->root.bo->tbo.resv,
 			     owner);
 	if (r)
 		goto error_free;
 
-	r = reservation_object_reserve_shared(vm->page_directory->tbo.resv);
+	r = reservation_object_reserve_shared(vm->root.bo->tbo.resv);
 	if (r)
 		goto error_free;
 
@@ -1004,7 +1004,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
 	if (r)
 		goto error_free;
 
-	amdgpu_bo_fence(vm->page_directory, f, true);
+	amdgpu_bo_fence(vm->root.bo, f, true);
 	if (fence) {
 		fence_put(*fence);
 		*fence = fence_get(f);
@@ -1395,15 +1395,15 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 
 	BUG_ON(eaddr >= amdgpu_vm_num_pdes(adev));
 
-	if (eaddr > vm->max_pde_used)
-		vm->max_pde_used = eaddr;
+	if (eaddr > vm->root.last_entry_used)
+		vm->root.last_entry_used = eaddr;
 
 	/* walk over the address space and allocate the page tables */
 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
-		struct reservation_object *resv = vm->page_directory->tbo.resv;
+		struct reservation_object *resv = vm->root.bo->tbo.resv;
 		struct amdgpu_bo *pt;
 
-		if (vm->page_tables[pt_idx].bo)
+		if (vm->root.entries[pt_idx].bo)
 			continue;
 
 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
@@ -1420,10 +1420,10 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 		/* Keep a reference to the page table to avoid freeing
 		 * them up in the wrong order.
 		 */
-		pt->parent = amdgpu_bo_ref(vm->page_directory);
+		pt->parent = amdgpu_bo_ref(vm->root.bo);
 
-		vm->page_tables[pt_idx].bo = pt;
-		vm->page_tables[pt_idx].addr = 0;
+		vm->root.entries[pt_idx].bo = pt;
+		vm->root.entries[pt_idx].addr = 0;
 	}
 
 	return 0;
@@ -1580,8 +1580,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	pd_entries = amdgpu_vm_num_pdes(adev);
 
 	/* allocate page table array */
-	vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
-	if (vm->page_tables == NULL) {
+	vm->root.entries = drm_calloc_large(pd_entries,
+					    sizeof(struct amdgpu_vm_pt));
+	if (vm->root.entries == NULL) {
 		DRM_ERROR("Cannot allocate memory for page table array\n");
 		return -ENOMEM;
 	}
@@ -1605,29 +1606,29 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 			     AMDGPU_GEM_CREATE_SHADOW |
 			     AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
 			     AMDGPU_GEM_CREATE_VRAM_CLEARED,
-			     NULL, NULL, &vm->page_directory);
+			     NULL, NULL, &vm->root.bo);
 	if (r)
 		goto error_free_sched_entity;
 
-	r = amdgpu_bo_reserve(vm->page_directory, false);
+	r = amdgpu_bo_reserve(vm->root.bo, false);
 	if (r)
-		goto error_free_page_directory;
+		goto error_free_root;
 
 	vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
-	amdgpu_bo_unreserve(vm->page_directory);
+	amdgpu_bo_unreserve(vm->root.bo);
 
 	return 0;
 
-error_free_page_directory:
-	amdgpu_bo_unref(&vm->page_directory->shadow);
-	amdgpu_bo_unref(&vm->page_directory);
-	vm->page_directory = NULL;
+error_free_root:
+	amdgpu_bo_unref(&vm->root.bo->shadow);
+	amdgpu_bo_unref(&vm->root.bo);
+	vm->root.bo = NULL;
 
 error_free_sched_entity:
 	amd_sched_entity_fini(&ring->sched, &vm->entity);
 
 err:
-	drm_free_large(vm->page_tables);
+	drm_free_large(vm->root.entries);
 
 	return r;
 }
@@ -1662,7 +1663,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 	}
 
 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
-		struct amdgpu_bo *pt = vm->page_tables[i].bo;
+		struct amdgpu_bo *pt = vm->root.entries[i].bo;
 
 		if (!pt)
 			continue;
@@ -1670,10 +1671,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 		amdgpu_bo_unref(&pt->shadow);
 		amdgpu_bo_unref(&pt);
 	}
-	drm_free_large(vm->page_tables);
+	drm_free_large(vm->root.entries);
 
-	amdgpu_bo_unref(&vm->page_directory->shadow);
-	amdgpu_bo_unref(&vm->page_directory);
+	amdgpu_bo_unref(&vm->root.bo->shadow);
+	amdgpu_bo_unref(&vm->root.bo);
 	fence_put(vm->last_dir_update);
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index e208186f..1f54563 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -76,6 +76,10 @@
 struct amdgpu_vm_pt {
 	struct amdgpu_bo	*bo;
 	uint64_t		addr;
+
+	/* array of page tables, one for each directory entry */
+	struct amdgpu_vm_pt	*entries;
+	unsigned		last_entry_used;
 };
 
 struct amdgpu_vm {
@@ -95,13 +99,10 @@ struct amdgpu_vm {
 	struct list_head	freed;
 
 	/* contains the page directory */
-	struct amdgpu_bo	*page_directory;
-	unsigned		max_pde_used;
+	struct amdgpu_vm_pt     root;
 	struct fence		*last_dir_update;
 	uint64_t		last_eviction_counter;
 
-	/* array of page tables, one for each page directory entry */
-	struct amdgpu_vm_pt	*page_tables;
 
 	/* for id and flush management per ring */
 	struct amdgpu_vm_id	*ids[AMDGPU_MAX_RINGS];
-- 
1.9.1



[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux