From: Harish Kasiviswanathan <Harish.Kasiviswanathan@xxxxxxx> Bundle some VM table parameters into amdgpu_vm_update_params structure, so that number of function parameters can be reduced. Only structural change, no logic change. v2: agd: squash in fix from Harish Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@xxxxxxx> Reviewed-by: Christian König <christian.koenig@xxxxxxx> Signed-off-by: Alex Deucher <alexander.deucher@xxxxxxx> --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 111 ++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index ea708cb..9f36ed3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -53,6 +53,18 @@ /* Special value that no flush is necessary */ #define AMDGPU_VM_NO_FLUSH (~0ll) +/* Local structure. Encapsulate some VM table update parameters to reduce + * the number of function parameters + */ +struct amdgpu_vm_update_params { + /* address where to copy page table entries from */ + uint64_t src; + /* DMA addresses to use for mapping */ + dma_addr_t *pages_addr; + /* indirect buffer to fill with commands */ + struct amdgpu_ib *ib; +}; + /** * amdgpu_vm_num_pde - return the number of page directory entries * @@ -389,9 +401,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * amdgpu_vm_update_pages - helper to call the right asic function * * @adev: amdgpu_device pointer - * @src: address where to copy page table entries from - * @pages_addr: DMA addresses to use for mapping - * @ib: indirect buffer to fill with commands + * @vm_update_params: see amdgpu_vm_update_params definition * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update @@ -402,29 +412,29 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, * to setup the page table using the DMA. */ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, - uint64_t src, - dma_addr_t *pages_addr, - struct amdgpu_ib *ib, + struct amdgpu_vm_update_params + *vm_update_params, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { trace_amdgpu_vm_set_page(pe, addr, count, incr, flags); - if (src) { - src += (addr >> 12) * 8; - amdgpu_vm_copy_pte(adev, ib, pe, src, count); + if (vm_update_params->src) { + amdgpu_vm_copy_pte(adev, vm_update_params->ib, + pe, (vm_update_params->src + (addr >> 12) * 8), count); - } else if (pages_addr) { - amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr, - count, incr, flags); + } else if (vm_update_params->pages_addr) { + amdgpu_vm_write_pte(adev, vm_update_params->ib, + vm_update_params->pages_addr, + pe, addr, count, incr, flags); } else if (count < 3) { - amdgpu_vm_write_pte(adev, ib, NULL, pe, addr, + amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr, count, incr, flags); } else { - amdgpu_vm_set_pte_pde(adev, ib, pe, addr, + amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr, count, incr, flags); } } @@ -444,10 +454,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_ring *ring; struct fence *fence = NULL; struct amdgpu_job *job; + struct amdgpu_vm_update_params vm_update_params; unsigned entries; uint64_t addr; int r; + memset(&vm_update_params, 0, sizeof(vm_update_params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); r = reservation_object_reserve_shared(bo->tbo.resv); @@ -465,7 +477,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, if (r) goto error; - amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries, + vm_update_params.ib = &job->ibs[0]; + amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries, 0, 0); amdgpu_ring_pad_ib(ring, &job->ibs[0]); @@ -538,11 +551,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, uint64_t last_pde = ~0, last_pt = ~0; unsigned count = 0, pt_idx, ndw; struct amdgpu_job *job; - struct amdgpu_ib *ib; + struct amdgpu_vm_update_params vm_update_params; struct fence *fence = NULL; int r; + memset(&vm_update_params, 0, sizeof(vm_update_params)); ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); /* padding, etc. */ @@ -555,7 +569,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, if (r) return r; - ib = &job->ibs[0]; + vm_update_params.ib = &job->ibs[0]; /* walk over the address space and update the page directory */ for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { @@ -575,7 +589,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ((last_pt + incr * count) != pt)) { if (count) { - amdgpu_vm_update_pages(adev, 0, NULL, ib, + amdgpu_vm_update_pages(adev, &vm_update_params, last_pde, last_pt, count, incr, AMDGPU_PTE_VALID); @@ -590,14 +604,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, } if (count) - amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt, - count, incr, AMDGPU_PTE_VALID); + amdgpu_vm_update_pages(adev, &vm_update_params, + last_pde, last_pt, + count, incr, AMDGPU_PTE_VALID); - if (ib->length_dw != 0) { - amdgpu_ring_pad_ib(ring, ib); + if (vm_update_params.ib->length_dw != 0) { + amdgpu_ring_pad_ib(ring, vm_update_params.ib); amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); - WARN_ON(ib->length_dw > ndw); + WARN_ON(vm_update_params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &fence); if (r) @@ -623,18 +638,15 @@ error_free: * amdgpu_vm_frag_ptes - add fragment information to PTEs * * @adev: amdgpu_device pointer - * @src: address where to copy page table entries from - * @pages_addr: DMA addresses to use for mapping - * @ib: IB for the update + * @vm_update_params: see amdgpu_vm_update_params definition * @pe_start: first PTE to handle * @pe_end: last PTE to handle * @addr: addr those PTEs should point to * @flags: hw mapping flags */ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, - uint64_t src, - dma_addr_t *pages_addr, - struct amdgpu_ib *ib, + struct amdgpu_vm_update_params + *vm_update_params, uint64_t pe_start, uint64_t pe_end, uint64_t addr, uint32_t flags) { @@ -671,11 +683,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, return; /* system pages are non continuously */ - if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) || - (frag_start >= frag_end)) { + if (vm_update_params->src || vm_update_params->pages_addr || + !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) { count = (pe_end - pe_start) / 8; - amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start, + amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); return; @@ -684,21 +696,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, /* handle the 4K area at the beginning */ if (pe_start != frag_start) { count = (frag_start - pe_start) / 8; - amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr, + amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); addr += AMDGPU_GPU_PAGE_SIZE * count; } /* handle the area in the middle */ count = (frag_end - frag_start) / 8; - amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count, + amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count, AMDGPU_GPU_PAGE_SIZE, flags | frag_flags); /* handle the 4K area at the end */ if (frag_end != pe_end) { addr += AMDGPU_GPU_PAGE_SIZE * count; count = (pe_end - frag_end) / 8; - amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr, + amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr, count, AMDGPU_GPU_PAGE_SIZE, flags); } } @@ -707,8 +719,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * amdgpu_vm_update_ptes - make sure that page tables are valid * * @adev: amdgpu_device pointer - * @src: address where to copy page table entries from - * @pages_addr: DMA addresses to use for mapping + * @vm_update_params: see amdgpu_vm_update_params definition * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range @@ -718,10 +729,9 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * Update the page tables in the range @start - @end. */ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, - uint64_t src, - dma_addr_t *pages_addr, + struct amdgpu_vm_update_params + *vm_update_params, struct amdgpu_vm *vm, - struct amdgpu_ib *ib, uint64_t start, uint64_t end, uint64_t dst, uint32_t flags) { @@ -747,7 +757,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, if (last_pe_end != pe_start) { - amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, + amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start, last_pe_end, last_dst, flags); @@ -762,7 +772,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start, + amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start, last_pe_end, last_dst, flags); } @@ -794,11 +804,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, void *owner = AMDGPU_FENCE_OWNER_VM; unsigned nptes, ncmds, ndw; struct amdgpu_job *job; - struct amdgpu_ib *ib; + struct amdgpu_vm_update_params vm_update_params; struct fence *f = NULL; int r; ring = container_of(vm->entity.sched, struct amdgpu_ring, sched); + memset(&vm_update_params, 0, sizeof(vm_update_params)); + vm_update_params.src = src; + vm_update_params.pages_addr = pages_addr; /* sync to everything on unmapping */ if (!(flags & AMDGPU_PTE_VALID)) @@ -815,11 +828,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, /* padding, etc. */ ndw = 64; - if (src) { + if (vm_update_params.src) { /* only copy commands needed */ ndw += ncmds * 7; - } else if (pages_addr) { + } else if (vm_update_params.pages_addr) { /* header for write data commands */ ndw += ncmds * 4; @@ -838,7 +851,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) return r; - ib = &job->ibs[0]; + vm_update_params.ib = &job->ibs[0]; r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, owner); @@ -849,11 +862,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, if (r) goto error_free; - amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start, + amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start, last + 1, addr, flags); - amdgpu_ring_pad_ib(ring, ib); - WARN_ON(ib->length_dw > ndw); + amdgpu_ring_pad_ib(ring, vm_update_params.ib); + WARN_ON(vm_update_params.ib->length_dw > ndw); r = amdgpu_job_submit(job, ring, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) -- 2.5.5 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel