[PATCH 38/44] drm/amdkfd: Simplify split_by_granularity

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



svm_range_split_by_granularity always added the parent range and only
the parent range to the update list for the caller to add it to the
deferred work list. So just do that in the caller unconditionally and
eliminate the update_list parameter.

Split the range so that the original prange is always the one that
will be migrated. That way we can eliminate the pmigrate parameter
and simplify the code further.

Update the outdated documentation.

Change-Id: Ifdc8d29b2abda67478e0d41daf5b46b861802ae7
Signed-off-by: Felix Kuehling <Felix.Kuehling@xxxxxxx>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 29 ++++------
 drivers/gpu/drm/amd/amdkfd/kfd_svm.c     | 73 ++++++------------------
 drivers/gpu/drm/amd/amdkfd/kfd_svm.h     |  4 +-
 3 files changed, 30 insertions(+), 76 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index da2ff655812e..5c8b32873086 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -780,12 +780,10 @@ svm_migrate_to_vram(struct svm_range *prange, uint32_t best_loc,
 static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
 {
 	unsigned long addr = vmf->address;
-	struct list_head update_list;
-	struct svm_range *pmigrate;
 	struct vm_area_struct *vma;
+	enum svm_work_list_ops op;
 	struct svm_range *parent;
 	struct svm_range *prange;
-	struct svm_range *next;
 	struct kfd_process *p;
 	struct mm_struct *mm;
 	int r = 0;
@@ -816,31 +814,24 @@ static vm_fault_t svm_migrate_to_ram(struct vm_fault *vmf)
 		goto out_unlock_prange;
 
 	svm_range_lock(parent);
-	r = svm_range_split_by_granularity(p, mm, addr, parent, prange,
-					   &pmigrate, &update_list);
+	r = svm_range_split_by_granularity(p, mm, addr, parent, prange);
 	svm_range_unlock(parent);
 	if (r) {
 		pr_debug("failed %d to split range by granularity\n", r);
 		goto out_unlock_prange;
 	}
 
-	r = svm_migrate_vram_to_ram(pmigrate, mm);
+	r = svm_migrate_vram_to_ram(prange, mm);
 	if (r)
 		pr_debug("failed %d migrate 0x%p [0x%lx 0x%lx] to ram\n", r,
-			 pmigrate, pmigrate->start, pmigrate->last);
-
-	list_for_each_entry_safe(prange, next, &update_list, update_list) {
-		enum svm_work_list_ops op;
-
-		/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
-		if (p->xnack_enabled && prange == pmigrate)
-			op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
-		else
-			op = SVM_OP_UPDATE_RANGE_NOTIFIER;
+			 prange, prange->start, prange->last);
 
-		svm_range_add_list_work(&p->svms, prange, mm, op);
-		list_del_init(&prange->update_list);
-	}
+	/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
+	if (p->xnack_enabled && parent == prange)
+		op = SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP;
+	else
+		op = SVM_OP_UPDATE_RANGE_NOTIFIER;
+	svm_range_add_list_work(&p->svms, parent, mm, op);
 	schedule_deferred_list_work(&p->svms);
 
 out_unlock_prange:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 3a7030d9f331..fbcb1491e987 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1005,16 +1005,14 @@ void svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
  *
  * @p: the process with svms list
  * @mm: mm structure
+ * @addr: the vm fault address in pages, to split the prange
  * @parent: parent range if prange is from child list
  * @prange: prange to split
- * @addr: the vm fault address in pages, to split the prange
- * @pmigrate: output, the range to be migrated to ram
- * @update_list: output, the ranges to update notifier
  *
- * Collects small ranges that make up one migration granule and splits the first
- * and the last range at the granularity boundary
+ * Trims @prange to be a single aligned block of prange->granularity if
+ * possible. The head and tail are added to the child_list in @parent.
  *
- * Context: caller hold svms lock
+ * Context: caller must hold mmap_read_lock and prange->lock
  *
  * Return:
  * 0 - OK, otherwise error code
@@ -1022,75 +1020,42 @@ void svm_range_add_child(struct svm_range *prange, struct mm_struct *mm,
 int
 svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
 			       unsigned long addr, struct svm_range *parent,
-			       struct svm_range *prange,
-			       struct svm_range **pmigrate,
-			       struct list_head *update_list)
+			       struct svm_range *prange)
 {
-	struct svm_range *tail;
-	struct svm_range *new;
-	unsigned long start;
-	unsigned long last;
-	unsigned long size;
-	int r = 0;
+	struct svm_range *head, *tail;
+	unsigned long start, last, size;
+	int r;
 
 	/* Align splited range start and size to granularity size, then a single
 	 * PTE will be used for whole range, this reduces the number of PTE
 	 * updated and the L1 TLB space used for translation.
 	 */
-	size = 1ULL << prange->granularity;
+	size = 1UL << prange->granularity;
 	start = ALIGN_DOWN(addr, size);
 	last = ALIGN(addr + 1, size) - 1;
-	INIT_LIST_HEAD(update_list);
-	INIT_LIST_HEAD(&parent->update_list);
 
 	pr_debug("svms 0x%p split [0x%lx 0x%lx] to [0x%lx 0x%lx] size 0x%lx\n",
 		 prange->svms, prange->start, prange->last, start, last, size);
 
 	if (start > prange->start) {
-		r = svm_range_split(prange, prange->start, start - 1, &new);
+		r = svm_range_split(prange, start, prange->last, &head);
 		if (r)
 			return r;
-
-		svm_range_add_child(parent, mm, new, SVM_OP_ADD_RANGE);
-
-		if (parent == prange) {
-			pr_debug("add to update list prange 0x%p [0x%lx 0x%lx]\n",
-				 parent, parent->start, parent->last);
-			list_add(&parent->update_list, update_list);
-		}
-	} else {
-		new = prange;
-	}
-
-	if (last >= new->last) {
-		pr_debug("entire prange 0x%p [0x%lx 0x%lx] on prange %s list\n",
-			 new, new->start, new->last,
-			 (parent == prange) ? "" : "child");
-		goto out_update;
+		svm_range_add_child(parent, mm, head, SVM_OP_ADD_RANGE);
 	}
 
-	pr_debug("split remaining last 0x%lx [0x%lx 0x%lx] from prange %s\n",
-		last, new->start, new->last, (parent == new) ? "" : "child");
-	r = svm_range_split(new, new->start, last, &tail);
-	if (r)
-		return r;
-	svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
-
-out_update:
-	/* If parent is not on update list, add it to put into deferred work */
-	if (list_empty(&parent->update_list)) {
-		pr_debug("add to update list parange 0x%p [0x%lx 0x%lx]\n",
-			 prange, parent->start, parent->last);
-		list_add(&parent->update_list, update_list);
+	if (last < prange->last) {
+		r = svm_range_split(prange, prange->start, last, &tail);
+		if (r)
+			return r;
+		svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE);
 	}
 
-	*pmigrate = new;
-
 	/* xnack on, update mapping on GPUs with ACCESS_IN_PLACE */
-	if (p->xnack_enabled && (*pmigrate)->work_item.op == SVM_OP_ADD_RANGE) {
-		(*pmigrate)->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
+	if (p->xnack_enabled && prange->work_item.op == SVM_OP_ADD_RANGE) {
+		prange->work_item.op = SVM_OP_ADD_RANGE_AND_MAP;
 		pr_debug("change prange 0x%p [0x%lx 0x%lx] op %d\n",
-			 *pmigrate, (*pmigrate)->start, (*pmigrate)->last,
+			 prange, prange->start, prange->last,
 			 SVM_OP_ADD_RANGE_AND_MAP);
 	}
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index b2ab920ab884..7fce3fccfe58 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -162,9 +162,7 @@ int svm_range_vram_node_new(struct amdgpu_device *adev,
 void svm_range_vram_node_free(struct svm_range *prange);
 int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm,
 			       unsigned long addr, struct svm_range *parent,
-			       struct svm_range *prange,
-			       struct svm_range **pmigrate,
-			       struct list_head *deferred_update_list);
+			       struct svm_range *prange);
 int svm_range_restore_pages(struct amdgpu_device *adev,
 			    unsigned int pasid, uint64_t addr);
 int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence);
-- 
2.31.0

_______________________________________________
amd-gfx mailing list
amd-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/amd-gfx



[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux