[PATCH] drm/amdgpu: use new HMM APIs and helpers v2

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



HMM provides new APIs and helps in kernel 5.2-rc1 to simplify driver
path. The old hmm APIs are deprecated and will be removed in future.

Below are changes in driver:

1. Change hmm_vma_fault to hmm_range_register and hmm_range_fault which
supports range with multiple vmas, remove the multiple vmas handle path
and data structure.
2. Change hmm_vma_range_done to hmm_range_unregister.
3. Use default flags to avoid pre-fill pfn arrays.
4. Use new hmm_device_ helpers.

v2: retry if hmm_range_fault returns -EAGAIN

Change-Id: I1a00f88459f3b96c9536933e9a17eb1ebaa78113
Signed-off-by: Philip Yang <Philip.Yang@xxxxxxx>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c  |   1 -
 drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 150 ++++++++++--------------
 2 files changed, 63 insertions(+), 88 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 41ccee49a224..e0bb47ce570b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -519,7 +519,6 @@ void amdgpu_hmm_init_range(struct hmm_range *range)
 		range->flags = hmm_range_flags;
 		range->values = hmm_range_values;
 		range->pfn_shift = PAGE_SHIFT;
-		range->pfns = NULL;
 		INIT_LIST_HEAD(&range->list);
 	}
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index eb4b85061c7e..f129160939bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -711,8 +711,7 @@ struct amdgpu_ttm_tt {
 	struct task_struct	*usertask;
 	uint32_t		userflags;
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-	struct hmm_range	*ranges;
-	int			nr_ranges;
+	struct hmm_range	*range;
 #endif
 };
 
@@ -725,57 +724,34 @@ struct amdgpu_ttm_tt {
  */
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
 
-/* Support Userptr pages cross max 16 vmas */
-#define MAX_NR_VMAS	(16)
-
 int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 	struct mm_struct *mm = gtt->usertask->mm;
 	unsigned long start = gtt->userptr;
-	unsigned long end = start + ttm->num_pages * PAGE_SIZE;
-	struct vm_area_struct *vma = NULL, *vmas[MAX_NR_VMAS];
-	struct hmm_range *ranges;
-	unsigned long nr_pages, i;
-	uint64_t *pfns, f;
+	struct vm_area_struct *vma;
+	struct hmm_range *range;
+	unsigned long i;
+	uint64_t *pfns;
+	int retry = 0;
 	int r = 0;
 
 	if (!mm) /* Happens during process shutdown */
 		return -ESRCH;
 
-	down_read(&mm->mmap_sem);
-
-	/* user pages may cross multiple VMAs */
-	gtt->nr_ranges = 0;
-	do {
-		unsigned long vm_start;
-
-		if (gtt->nr_ranges >= MAX_NR_VMAS) {
-			DRM_ERROR("Too many VMAs in userptr range\n");
-			r = -EFAULT;
-			goto out;
-		}
-
-		vm_start = vma ? vma->vm_end : start;
-		vma = find_vma(mm, vm_start);
-		if (unlikely(!vma || vm_start < vma->vm_start)) {
-			r = -EFAULT;
-			goto out;
-		}
-		vmas[gtt->nr_ranges++] = vma;
-	} while (end > vma->vm_end);
-
-	DRM_DEBUG_DRIVER("0x%lx nr_ranges %d pages 0x%lx\n",
-		start, gtt->nr_ranges, ttm->num_pages);
-
+	vma = find_vma(mm, start);
+	if (unlikely(!vma || start < vma->vm_start)) {
+		r = -EFAULT;
+		goto out;
+	}
 	if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
-		vmas[0]->vm_file)) {
+		vma->vm_file)) {
 		r = -EPERM;
 		goto out;
 	}
 
-	ranges = kvmalloc_array(gtt->nr_ranges, sizeof(*ranges), GFP_KERNEL);
-	if (unlikely(!ranges)) {
+	range = kzalloc(sizeof(*range), GFP_KERNEL);
+	if (unlikely(!range)) {
 		r = -ENOMEM;
 		goto out;
 	}
@@ -786,61 +762,61 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
 		goto out_free_ranges;
 	}
 
-	for (i = 0; i < gtt->nr_ranges; i++)
-		amdgpu_hmm_init_range(&ranges[i]);
-
-	f = ranges[0].flags[HMM_PFN_VALID];
-	f |= amdgpu_ttm_tt_is_readonly(ttm) ?
-				0 : ranges[0].flags[HMM_PFN_WRITE];
-	memset64(pfns, f, ttm->num_pages);
+	amdgpu_hmm_init_range(range);
+	range->default_flags = range->flags[HMM_PFN_VALID];
+	range->default_flags |= amdgpu_ttm_tt_is_readonly(ttm) ?
+				0 : range->flags[HMM_PFN_WRITE];
+	range->pfn_flags_mask = 0;
+	range->pfns = pfns;
+	hmm_range_register(range, mm, start,
+			   start + ttm->num_pages * PAGE_SIZE, PAGE_SHIFT);
 
-	for (nr_pages = 0, i = 0; i < gtt->nr_ranges; i++) {
-		ranges[i].vma = vmas[i];
-		ranges[i].start = max(start, vmas[i]->vm_start);
-		ranges[i].end = min(end, vmas[i]->vm_end);
-		ranges[i].pfns = pfns + nr_pages;
-		nr_pages += (ranges[i].end - ranges[i].start) / PAGE_SIZE;
+again:
+	/*
+	 * Just wait for range to be valid, safe to ignore return value as we
+	 * will use the return value of hmm_range_fault() below under the
+	 * mmap_sem to ascertain the validity of the range.
+	 */
+	hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT);
 
-		r = hmm_vma_fault(&ranges[i], true);
-		if (unlikely(r))
-			break;
-	}
-	if (unlikely(r)) {
-		while (i--)
-			hmm_vma_range_done(&ranges[i]);
+	down_read(&mm->mmap_sem);
 
-		goto out_free_pfns;
-	}
+	r = hmm_range_fault(range, true);
 
 	up_read(&mm->mmap_sem);
 
+	if (unlikely(r < 0)) {
+		if (likely(r == -EAGAIN)) {
+			if (retry++ < 16)
+				goto again;
+			else
+				pr_err("Retry hmm fault too many times\n");
+		}
+
+		goto out_free_pfns;
+	}
+	
 	for (i = 0; i < ttm->num_pages; i++) {
-		pages[i] = hmm_pfn_to_page(&ranges[0], pfns[i]);
-		if (!pages[i]) {
+		pages[i] = hmm_device_entry_to_page(range, pfns[i]);
+		if (unlikely(!pages[i])) {
 			pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
 			       i, pfns[i]);
-			goto out_invalid_pfn;
+			r = -ENOMEM;
+
+			goto out_free_pfns;
 		}
 	}
-	gtt->ranges = ranges;
+	gtt->range = range;
 
 	return 0;
 
 out_free_pfns:
+	hmm_range_unregister(range);
 	kvfree(pfns);
 out_free_ranges:
-	kvfree(ranges);
+	kfree(range);
 out:
-	up_read(&mm->mmap_sem);
-
 	return r;
-
-out_invalid_pfn:
-	for (i = 0; i < gtt->nr_ranges; i++)
-		hmm_vma_range_done(&ranges[i]);
-	kvfree(pfns);
-	kvfree(ranges);
-	return -ENOMEM;
 }
 
 /**
@@ -853,23 +829,23 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
 {
 	struct amdgpu_ttm_tt *gtt = (void *)ttm;
 	bool r = false;
-	int i;
 
 	if (!gtt || !gtt->userptr)
 		return false;
 
-	DRM_DEBUG_DRIVER("user_pages_done 0x%llx nr_ranges %d pages 0x%lx\n",
-		gtt->userptr, gtt->nr_ranges, ttm->num_pages);
+	DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
+		gtt->userptr, ttm->num_pages);
 
-	WARN_ONCE(!gtt->ranges || !gtt->ranges[0].pfns,
+	WARN_ONCE(!gtt->range || !gtt->range->pfns,
 		"No user pages to check\n");
 
-	if (gtt->ranges) {
-		for (i = 0; i < gtt->nr_ranges; i++)
-			r |= hmm_vma_range_done(&gtt->ranges[i]);
-		kvfree(gtt->ranges[0].pfns);
-		kvfree(gtt->ranges);
-		gtt->ranges = NULL;
+	if (gtt->range) {
+		r = hmm_range_valid(gtt->range);
+		hmm_range_unregister(gtt->range);
+
+		kvfree(gtt->range->pfns);
+		kfree(gtt->range);
+		gtt->range = NULL;
 	}
 
 	return r;
@@ -953,9 +929,9 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 	sg_free_table(ttm->sg);
 
 #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
-	if (gtt->ranges &&
-	    ttm->pages[0] == hmm_pfn_to_page(&gtt->ranges[0],
-					     gtt->ranges[0].pfns[0]))
+	if (gtt->range &&
+	    ttm->pages[0] == hmm_device_entry_to_page(gtt->range,
+						      gtt->range->pfns[0]))
 		WARN_ONCE(1, "Missing get_user_page_done\n");
 #endif
 }
-- 
2.17.1

_______________________________________________
amd-gfx mailing list
amd-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/amd-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux