Not all migrate.cpages returned from migrate_vma_setup can be migrated, for example non anonymous page, or out of device memory. So after migrate_vma_pages returns, check pages are successfully migrated which has MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set. Signed-off-by: Philip Yang <Philip.Yang@xxxxxxx> --- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 33 +++++++++++++++++++----- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 147300a161d0..22ab9c940157 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -268,6 +268,22 @@ static void svm_migrate_put_sys_page(unsigned long addr) put_page(page); } +static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) +{ + unsigned long cpages = 0; + unsigned long i; + + for (i = 0; i < migrate->npages; i++) { + if (migrate->src[i] & MIGRATE_PFN_VALID && + migrate->src[i] & MIGRATE_PFN_MIGRATE) + cpages++; + } + if (cpages != migrate->cpages) + pr_debug("cpages/migrate.cpages/npages 0x%lx/0x%lx/0x%lx\n", + cpages, migrate->cpages, migrate->npages); + return cpages; +} + static int svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, @@ -385,6 +401,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate; + unsigned long cpages = 0; dma_addr_t *scratch; size_t size; void *buf; @@ -428,6 +445,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch); migrate_vma_pages(&migrate); + cpages = svm_migrate_successful_pages(&migrate); svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); @@ -437,12 +455,12 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, out_free: kvfree(buf); out: - if (!r && migrate.cpages) { + if (!r && cpages) { pdd = svm_range_get_pdd_by_adev(prange, adev); if (pdd) - WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages); + WRITE_ONCE(pdd->page_in, pdd->page_in + cpages); - return migrate.cpages; + return cpages; } return r; } @@ -619,6 +637,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate; + unsigned long cpages = 0; dma_addr_t *scratch; size_t size; void *buf; @@ -663,6 +682,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, scratch, npages); migrate_vma_pages(&migrate); + cpages = svm_migrate_successful_pages(&migrate); svm_migrate_copy_done(adev, mfence); migrate_vma_finalize(&migrate); svm_range_dma_unmap(adev->dev, scratch, 0, npages); @@ -670,13 +690,12 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, out_free: kvfree(buf); out: - if (!r && migrate.cpages) { + if (!r && cpages) { pdd = svm_range_get_pdd_by_adev(prange, adev); if (pdd) - WRITE_ONCE(pdd->page_out, - pdd->page_out + migrate.cpages); + WRITE_ONCE(pdd->page_out, pdd->page_out + cpages); - return migrate.cpages; + return cpages; } return r; } -- 2.17.1