Add helper function to get process device data structure from adev to update counters. Update vm faults, page_in, page_out counters will no be executed in parallel, use WRITE_ONCE to avoid any form of compiler optimizations. Signed-off-by: Philip Yang <Philip.Yang@xxxxxxx> --- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 14 ++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 24 ++++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 2 ++ 3 files changed, 40 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index fd8f544f0de2..45b5349283af 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -413,6 +413,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, uint64_t end) { uint64_t npages = (end - start) >> PAGE_SHIFT; + struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate; dma_addr_t *scratch; @@ -473,6 +474,12 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, out_free: kvfree(buf); out: + if (!r) { + pdd = svm_range_get_pdd_by_adev(prange, adev); + if (pdd) + WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages); + } + return r; } @@ -629,6 +636,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, uint64_t end) { uint64_t npages = (end - start) >> PAGE_SHIFT; + struct kfd_process_device *pdd; struct dma_fence *mfence = NULL; struct migrate_vma migrate; dma_addr_t *scratch; @@ -678,6 +686,12 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, out_free: kvfree(buf); out: + if (!r) { + pdd = svm_range_get_pdd_by_adev(prange, adev); + if (pdd) + WRITE_ONCE(pdd->page_out, + pdd->page_out + migrate.cpages); + } return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 5468ea4264c6..f3323328f01f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -564,6 +564,24 @@ svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id) return (struct amdgpu_device *)pdd->dev->kgd; } +struct kfd_process_device * +svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev) +{ + struct kfd_process *p; + int32_t gpu_idx, gpuid; + int r; + + p = container_of(prange->svms, struct kfd_process, svms); + + r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx); + if (r) { + pr_debug("failed to get device id by adev %p\n", adev); + return NULL; + } + + return kfd_process_device_from_gpuidx(p, gpu_idx); +} + static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo) { struct ttm_operation_ctx ctx = { false, false }; @@ -2315,6 +2333,7 @@ int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, uint64_t addr) { + struct kfd_process_device *pdd; struct mm_struct *mm = NULL; struct svm_range_list *svms; struct svm_range *prange; @@ -2440,6 +2459,11 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, out_unlock_svms: mutex_unlock(&svms->lock); mmap_read_unlock(mm); + + pdd = svm_range_get_pdd_by_adev(prange, adev); + if (pdd) + WRITE_ONCE(pdd->faults, pdd->faults + 1); + mmput(mm); out: kfd_unref_process(p); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 0c0fc399395e..a9af03994d1a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -174,6 +174,8 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, unsigned long offset, unsigned long npages); void svm_range_free_dma_mappings(struct svm_range *prange); void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm); +struct kfd_process_device * +svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev); /* SVM API and HMM page migration work together, device memory type * is initialized to not 0 when page migration register device memory. -- 2.17.1 _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx