On 2023-12-13 19:24, Xiaogang.Chen
wrote:
Please move the version changes after the patch description.From: Xiaogang Chen <xiaogang.chen@xxxxxxx> v2: -not need calculate vram page number for new registered svm range, only do it for split vram pages. v3: -use dma address to calculate vram page number of split svm range; use migrate_vma from hmm to calculate page number that migrate to vram. v4: -combine calculating of vram page number of split svm range and page dma address copy in same loop if original svm range includes vram pages.
SVM uses hmm page walk to valid buffer before map to gpu vm. After have partial migration/mapping do validation on same vm range as migration/map do instead of whole svm range that can be very large. This change is expected to improve svm code performance. Signed-off-by: Xiaogang Chen<Xiaogang.Chen@xxxxxxx>
Thanks for the effort to improve the partial migration performance, with some nitpicks below fixed, this patch is
Reviewed-by: Philip Yang <philip.yang@xxxxxxx>
This was called incorrectly before migrate_vma_finalize, agree to remove it.--- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 35 ++++------- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 79 +++++++++++------------- 2 files changed, 48 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index b854cbf06dce..3fb8e59acfbf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -260,19 +260,6 @@ static void svm_migrate_put_sys_page(unsigned long addr) put_page(page); } -static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) -{ - unsigned long cpages = 0; - unsigned long i; - - for (i = 0; i < migrate->npages; i++) { - if (migrate->src[i] & MIGRATE_PFN_VALID && - migrate->src[i] & MIGRATE_PFN_MIGRATE) - cpages++; - } - return cpages; -}
- static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) { unsigned long upages = 0; @@ -402,6 +389,7 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, struct dma_fence *mfence = NULL; struct migrate_vma migrate = { 0 }; unsigned long cpages = 0; + unsigned long mpages = 0; dma_addr_t *scratch; void *buf; int r = -ENOMEM; @@ -450,12 +438,13 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); migrate_vma_pages(&migrate); - pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", - svm_migrate_successful_pages(&migrate), cpages, migrate.npages); - svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); + mpages = cpages - svm_migrate_unsuccessful_pages(&migrate); + pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", + mpages, cpages, migrate.npages); + kfd_smi_event_migration_end(node, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, 0, node->id, trigger); @@ -465,12 +454,12 @@ svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, out_free: kvfree(buf); out: - if (!r && cpages) { + if (!r && mpages) { pdd = svm_range_get_pdd_by_node(prange, node); if (pdd) - WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); + WRITE_ONCE(pdd->page_in, pdd->page_in + mpages); - return cpages; + return mpages; } return r; } @@ -498,7 +487,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, struct vm_area_struct *vma; uint64_t ttm_res_offset; struct kfd_node *node; - unsigned long cpages = 0; + unsigned long mpages = 0; long r = 0; if (start_mgr < prange->start || last_mgr > prange->last) { @@ -540,15 +529,15 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, pr_debug("failed %ld to migrate\n", r); break; } else { - cpages += r; + mpages += r; } ttm_res_offset += next - addr; addr = next; } - if (cpages) { + if (mpages) { prange->actual_loc = best_loc; - prange->vram_pages = prange->vram_pages + cpages; + prange->vram_pages = prange->vram_pages + mpages;
prange->vram_pages += mpages;
} else if (!prange->actual_loc) { /* if no page migrated and all pages from prange are at * sys ram drop svm_bo got from svm_range_vram_node_new diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 2834fb351818..61e363e388f8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -158,13 +158,12 @@ svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr) static int svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, unsigned long offset, unsigned long npages, - unsigned long *hmm_pfns, uint32_t gpuidx, uint64_t *vram_pages) + unsigned long *hmm_pfns, uint32_t gpuidx) { enum dma_data_direction dir = DMA_BIDIRECTIONAL; dma_addr_t *addr = prange->dma_addr[gpuidx]; struct device *dev = adev->dev; struct page *page; - uint64_t vram_pages_dev; int i, r; if (!addr) { @@ -174,7 +173,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, prange->dma_addr[gpuidx] = addr; } - vram_pages_dev = 0; addr += offset; for (i = 0; i < npages; i++) { if (svm_is_valid_dma_mapping_addr(dev, addr[i])) @@ -184,7 +182,6 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, if (is_zone_device_page(page)) { struct amdgpu_device *bo_adev = prange->svm_bo->node->adev; - vram_pages_dev++; addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + bo_adev->vm_manager.vram_base_offset - bo_adev->kfd.pgmap.range.start; @@ -201,14 +198,14 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n", addr[i] >> PAGE_SHIFT, page_to_pfn(page)); } - *vram_pages = vram_pages_dev; + return 0; } static int svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, unsigned long offset, unsigned long npages, - unsigned long *hmm_pfns, uint64_t *vram_pages) + unsigned long *hmm_pfns) { struct kfd_process *p; uint32_t gpuidx; @@ -227,7 +224,7 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, } r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, - hmm_pfns, gpuidx, vram_pages); + hmm_pfns, gpuidx); if (r) break; } @@ -885,14 +882,28 @@ static void svm_range_debug_dump(struct svm_range_list *svms) static void * svm_range_copy_array(void *psrc, size_t size, uint64_t num_elements, - uint64_t offset) + uint64_t offset, uint64_t *vram_pages) { + unsigned char *src = "" char *)psrc + offset; unsigned char *dst; + dma_addr_t *temp;
uint64_t i;
For loop variable declaration inside is not used in the kernel code, and use type uint64_t, not int for pages.
move local variable temp declaration to inside for loop.
the size is used to support 32-bit and 64-bit pointer size, we can use type cast here instead.dst = kvmalloc_array(num_elements, size, GFP_KERNEL); if (!dst) return NULL; - memcpy(dst, (unsigned char *)psrc + offset, num_elements * size); + + if (!vram_pages) { + memcpy(dst, src, num_elements * size); + return (void *)dst; + } + + *vram_pages = 0; + for (int i = 0; i < num_elements; i++) { + temp = (dma_addr_t *)(dst + i*size); + *temp = *(dma_addr_t *)(src + i*size);
+ for (i = 0; i < num_elements; i++) {+ if (*temp&SVM_RANGE_VRAM_DOMAIN) + (*vram_pages)++; + }
+ dma_addr_t *temp;
+
+ temp = (dma_addr_t *)dst + i;
+ *temp = *((dma_addr_t *)src + i);
+ if (*temp & SVM_RANGE_VRAM_DOMAIN)
return (void *)dst; } @@ -906,7 +917,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src) if (!src->dma_addr[i]) continue; dst->dma_addr[i] = svm_range_copy_array(src->dma_addr[i], - sizeof(*src->dma_addr[i]), src->npages, 0); + sizeof(*src->dma_addr[i]), src->npages, 0, NULL); if (!dst->dma_addr[i]) return -ENOMEM; } @@ -917,7 +928,7 @@ svm_range_copy_dma_addrs(struct svm_range *dst, struct svm_range *src) static int svm_range_split_array(void *ppnew, void *ppold, size_t size, uint64_t old_start, uint64_t old_n, - uint64_t new_start, uint64_t new_n) + uint64_t new_start, uint64_t new_n, uint64_t *new_vram_pages) { unsigned char *new, *old, *pold; uint64_t d; @@ -929,11 +940,12 @@ svm_range_split_array(void *ppnew, void *ppold, size_t size, return 0; d = (new_start - old_start) * size; - new = svm_range_copy_array(pold, size, new_n, d); + /* get dma addr array for new range and calculte its vram page number */ + new = svm_range_copy_array(pold, size, new_n, d, new_vram_pages); if (!new) return -ENOMEM; d = (new_start == old_start) ? new_n * size : 0; - old = svm_range_copy_array(pold, size, old_n, d); + old = svm_range_copy_array(pold, size, old_n, d, NULL); if (!old) { kvfree(new); return -ENOMEM; @@ -955,11 +967,15 @@ svm_range_split_pages(struct svm_range *new, struct svm_range *old, for (i = 0; i < MAX_GPU_INSTANCE; i++) { r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i], sizeof(*old->dma_addr[i]), old->start, - npages, new->start, new->npages); + npages, new->start, new->npages, + ((old->actual_loc && old->ttm_res)) ? + &new->vram_pages : NULL);
if old->actual_loc is not 0, then old->ttm_res is not null too,
+ old->actual_loc ?
&new->vram_pages : NULL);
if (r) return r; }
if (old->actual_loc)
old->vram_pages -= new->vram_pages;
+ old->vram_pages -= new->vram_pages; + return 0; } @@ -982,11 +998,6 @@ svm_range_split_nodes(struct svm_range *new, struct svm_range *old, new->svm_bo = svm_range_bo_ref(old->svm_bo); new->ttm_res = old->ttm_res; - /* set new's vram_pages as old range's now, the acurate vram_pages - * will be updated during mapping - */ - new->vram_pages = min(old->vram_pages, new->npages); - spin_lock(&new->svm_bo->list_lock); list_add(&new->svm_bo_list, &new->svm_bo->range_list); spin_unlock(&new->svm_bo->list_lock); @@ -1109,7 +1120,7 @@ static int svm_range_split_tail(struct svm_range *prange, uint64_t new_last, struct list_head *insert_list, struct list_head *remap_list) { - struct svm_range *tail; + struct svm_range *tail = NULL; int r = svm_range_split(prange, prange->start, new_last, &tail); if (!r) { @@ -1124,7 +1135,7 @@ static int svm_range_split_head(struct svm_range *prange, uint64_t new_start, struct list_head *insert_list, struct list_head *remap_list) { - struct svm_range *head; + struct svm_range *head = NULL; int r = svm_range_split(prange, new_start, prange->last, &head); if (!r) { @@ -1573,7 +1584,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm, struct svm_validate_context *ctx; unsigned long start, end, addr; struct kfd_process *p; - uint64_t vram_pages; void *owner; int32_t idx; int r = 0; @@ -1642,15 +1652,13 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } } - vram_pages = 0; - start = prange->start << PAGE_SHIFT; - end = (prange->last + 1) << PAGE_SHIFT; + start = map_start << PAGE_SHIFT; + end = (map_last + 1) << PAGE_SHIFT; for (addr = start; !r && addr < end; ) { struct hmm_range *hmm_range; unsigned long map_start_vma; unsigned long map_last_vma; struct vm_area_struct *vma; - uint64_t vram_pages_vma; unsigned long next = 0; unsigned long offset; unsigned long npages; @@ -1677,13 +1685,11 @@ static int svm_range_validate_and_map(struct mm_struct *mm, } if (!r) { - offset = (addr - start) >> PAGE_SHIFT; + offset = (addr - (prange->start << PAGE_SHIFT)) >> PAGE_SHIFT;
offset = (addr >> PAGE_SHIFT) - prange->start;
Regards,
Philip
r = svm_range_dma_map(prange, ctx->bitmap, offset, npages, - hmm_range->hmm_pfns, &vram_pages_vma); + hmm_range->hmm_pfns); if (r) pr_debug("failed %d to dma map range\n", r); - else - vram_pages += vram_pages_vma; } svm_range_lock(prange); @@ -1716,19 +1722,6 @@ static int svm_range_validate_and_map(struct mm_struct *mm, addr = next; } - if (addr == end) { - prange->vram_pages = vram_pages; - - /* if prange does not include any vram page and it - * has not released svm_bo drop its svm_bo reference - * and set its actaul_loc to sys ram - */ - if (!vram_pages && prange->ttm_res) { - prange->actual_loc = 0; - svm_range_vram_node_free(prange); - } - } - svm_range_unreserve_bos(ctx); if (!r) prange->validate_timestamp = ktime_get_boottime();