From: Alex Sierra <alex.sierra@xxxxxxx> Invalid pages can be the result of pages that have been migrated already due to copy-on-write procedure or pages that were never migrated to VRAM in first place. This is not an issue anymore, as pranges now support mixed memory domains (CPU/GPU). Signed-off-by: Alex Sierra <alex.sierra@xxxxxxx> Reviewed-by: Felix Kuehling <Felix.Kuehling@xxxxxxx> --- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 38 +++++++++++------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index b298aa8dea4d..6fd68528c425 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -419,7 +419,6 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, size_t size; void *buf; int r = -ENOMEM; - int retry = 0; memset(&migrate, 0, sizeof(migrate)); migrate.vma = vma; @@ -438,7 +437,6 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, migrate.dst = migrate.src + npages; scratch = (dma_addr_t *)(migrate.dst + npages); -retry: r = migrate_vma_setup(&migrate); if (r) { pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n", @@ -446,17 +444,9 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, goto out_free; } if (migrate.cpages != npages) { - pr_debug("collect 0x%lx/0x%llx pages, retry\n", migrate.cpages, + pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n", + migrate.cpages, npages); - migrate_vma_finalize(&migrate); - if (retry++ >= 3) { - r = -ENOMEM; - pr_debug("failed %d migrate svms 0x%p [0x%lx 0x%lx]\n", - r, prange->svms, prange->start, prange->last); - goto out_free; - } - - goto retry; } if (migrate.cpages) { @@ -547,9 +537,8 @@ static void svm_migrate_page_free(struct page *page) static int svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, - dma_addr_t *scratch) + dma_addr_t *scratch, uint64_t npages) { - uint64_t npages = migrate->cpages; struct device *dev = adev->dev; uint64_t *src; dma_addr_t *dst; @@ -566,15 +555,23 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, src = (uint64_t *)(scratch + npages); dst = scratch; - for (i = 0, j = 0; i < npages; i++, j++, addr += PAGE_SIZE) { + for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) { struct page *spage; spage = migrate_pfn_to_page(migrate->src[i]); - if (!spage) { - pr_debug("failed get spage svms 0x%p [0x%lx 0x%lx]\n", + if (!spage || !is_zone_device_page(spage)) { + pr_debug("invalid page. Could be in CPU already svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, prange->last); - r = -ENOMEM; - goto out_oom; + if (j) { + r = svm_migrate_copy_memory_gart(adev, dst + i - j, + src + i - j, j, + FROM_VRAM_TO_RAM, + mfence); + if (r) + goto out_oom; + j = 0; + } + continue; } src[i] = svm_migrate_addr(adev, spage); if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) { @@ -607,6 +604,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); migrate->dst[i] |= MIGRATE_PFN_LOCKED; + j++; } r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, @@ -664,7 +662,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, if (migrate.cpages) { r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, - scratch); + scratch, npages); migrate_vma_pages(&migrate); svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); -- 2.31.1