+ }
+}
+
void svm_range_set_max_pages(struct amdgpu_device *adev)
{
uint64_t max_pages;
@@ -2057,20 +2077,19 @@ svm_range_split_new(struct svm_range_list
*svms, uint64_t start, uint64_t last,
* @attrs: array of attributes
* @update_list: output, the ranges need validate and update GPU
mapping
* @insert_list: output, the ranges need insert to svms
- * @remove_list: output, the ranges are replaced and need remove
from svms
* @remap_list: output, remap unaligned svm ranges
*
* Check if the virtual address range has overlap with any existing
ranges,
* split partly overlapping ranges and add new ranges in the gaps.
All changes
* should be applied to the range_list and interval tree
transactionally. If
* any range split or allocation fails, the entire update fails.
Therefore any
- * existing overlapping svm_ranges are cloned and the original
svm_ranges left
+ * existing overlapping svm_ranges are duplicated and the original
svm_ranges left
* unchanged.
*
- * If the transaction succeeds, the caller can update and insert
clones and
- * new ranges, then free the originals.
+ * If the transaction succeeds, the caller can update and insert
split ranges and
+ * new ranges.
*
- * Otherwise the caller can free the clones and new ranges, while
the old
+ * Otherwise the caller can free the duplicated and new ranges,
while the old
* svm_ranges remain unchanged.
*
* Context: Process context, caller must hold svms->lock
@@ -2082,7 +2101,7 @@ static int
svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
struct list_head *update_list, struct list_head
*insert_list,
- struct list_head *remove_list, struct list_head *remap_list)
+ struct list_head *remap_list)
{
unsigned long last = start + size - 1UL;
struct svm_range_list *svms = &p->svms;
@@ -2090,13 +2109,14 @@ svm_range_add(struct kfd_process *p, uint64_t
start, uint64_t size,
struct svm_range *prange;
struct svm_range *tmp;
struct list_head new_list;
+ struct list_head modify_list;
int r = 0;
pr_debug("svms 0x%p [0x%llx 0x%lx]\n", &p->svms, start, last);
INIT_LIST_HEAD(update_list);
INIT_LIST_HEAD(insert_list);
- INIT_LIST_HEAD(remove_list);
+ INIT_LIST_HEAD(&modify_list);
INIT_LIST_HEAD(&new_list);
INIT_LIST_HEAD(remap_list);
@@ -2117,35 +2137,41 @@ svm_range_add(struct kfd_process *p,
uint64_t start, uint64_t size,
/* nothing to do */
} else if (node->start < start || node->last > last) {
/* node intersects the update range and its attributes
- * will change. Clone and split it, apply updates only
+ * will change. duplicate and split it, apply updates only
* to the overlapping part
*/
- struct svm_range *old = prange;
+ /* prange_dup is a temperal prange that holds size and
addr info
+ * updates of pragne after split
+ */
+ struct svm_range *prange_dup;
- prange = svm_range_clone(old);
- if (!prange) {
+ prange_dup = svm_range_duplicate(prange);
+ if (!prange_dup) {
r = -ENOMEM;
goto out;
}
- list_add(&old->update_list, remove_list);
- list_add(&prange->list, insert_list);
- list_add(&prange->update_list, update_list);
-
+ /* split prange_dup */
if (node->start < start) {
pr_debug("change old range start\n");
- r = svm_range_split_head(prange, start,
+ r = svm_range_split_head(prange_dup, start,
insert_list, remap_list);
if (r)
goto out;
}
if (node->last > last) {
pr_debug("change old range last\n");
- r = svm_range_split_tail(prange, last,
+ r = svm_range_split_tail(prange_dup, last,
insert_list, remap_list);
if (r)
goto out;
}
+
+ /* split success, insert_list has new head/tail pranges */
+ /* move prange from svm list to modify list */
+ list_move_tail(&prange->list, &modify_list);
+ /* put prange_dup at pragne->update_list */
+ list_add(&prange_dup->list, &prange->update_list);
} else {
/* The node is contained within start..last,
* just update it
@@ -2178,8 +2204,38 @@ svm_range_add(struct kfd_process *p, uint64_t
start, uint64_t size,
svm_range_free(prange, false);
list_for_each_entry_safe(prange, tmp, &new_list, list)
svm_range_free(prange, true);
+
+ list_for_each_entry_safe(prange, tmp, &modify_list, list) {
+ struct svm_range *prange_dup;
+
+ /* free pragne_dup that is associated with prange on
modify_list */
+ prange_dup = list_first_entry(&prange->update_list,
struct svm_range, list);
+ if (prange_dup)
+ svm_range_free(prange_dup, false);
+
+ INIT_LIST_HEAD(&prange->update_list);
+ /* put prange back to svm list */
+ list_move_tail(&prange->list, &svms->list);
+ }
} else {
list_splice(&new_list, insert_list);
+
+ list_for_each_entry_safe(prange, tmp, &modify_list, list) {
+ struct svm_range *prange_dup;
+
+ prange_dup = list_first_entry(&prange->update_list,
struct svm_range, list);
+ if (prange_dup) {
+ /* update prange from prange_dup */
+ svm_range_copy(prange, prange_dup);
+ /* release temporal pragne_dup */
+ svm_range_free(prange_dup, false);
+ }
+ INIT_LIST_HEAD(&prange->update_list);
+
+ /* move prange from modify_list to insert_list and
update_list*/
+ list_move_tail(&prange->list, insert_list);
+ list_add(&prange->update_list, update_list);
+ }
}
return r;
@@ -3533,7 +3589,6 @@ svm_range_set_attr(struct kfd_process *p,
struct mm_struct *mm,
struct amdkfd_process_info *process_info = p->kgd_process_info;
struct list_head update_list;
struct list_head insert_list;
- struct list_head remove_list;
struct list_head remap_list;
struct svm_range_list *svms;
struct svm_range *prange;
@@ -3566,7 +3621,7 @@ svm_range_set_attr(struct kfd_process *p,
struct mm_struct *mm,
/* Add new range and split existing ranges as needed */
r = svm_range_add(p, start, size, nattr, attrs, &update_list,
- &insert_list, &remove_list, &remap_list);
+ &insert_list, &remap_list);
if (r) {
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
@@ -3574,21 +3629,18 @@ svm_range_set_attr(struct kfd_process *p,
struct mm_struct *mm,
}
/* Apply changes as a transaction */
list_for_each_entry_safe(prange, next, &insert_list, list) {
- svm_range_add_to_svms(prange);
- svm_range_add_notifier_locked(mm, prange);
+ /* prange can be new or existing range, put it at svms->list */
+ list_move_tail(&prange->list, &prange->svms->list);
+ /* update prange at interval trees: svms->objects and
+ * mm interval notifier tree
+ */
+ svm_range_update_notifier_and_interval_tree(mm, prange);
+
}
list_for_each_entry(prange, &update_list, update_list) {
svm_range_apply_attrs(p, prange, nattr, attrs,
&update_mapping);
/* TODO: unmap ranges from GPU that lost access */
}
- list_for_each_entry_safe(prange, next, &remove_list, update_list) {
- pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
- prange->svms, prange, prange->start,
- prange->last);
- svm_range_unlink(prange);
- svm_range_remove_notifier(prange);
- svm_range_free(prange, false);
- }
mmap_write_downgrade(mm);
/* Trigger migrations and revalidate and map to GPUs as needed. If