From: Philip Yang <Philip.Yang@xxxxxxx> When application explicitly call unmap or unmap from mmput when application exit, driver will receive MMU_NOTIFY_UNMAP event to remove svm range from process svms object tree and list first, unmap from GPUs (in the following patch). Split the svm ranges to handle unmap partial svm range. Signed-off-by: Philip Yang <Philip.Yang@xxxxxxx> Signed-off-by: Alex Sierra <alex.sierra@xxxxxxx> Signed-off-by: Felix Kuehling <Felix.Kuehling@xxxxxxx> --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 86 ++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index ad007261f54c..55500ec4972f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -699,15 +699,101 @@ static void svm_range_srcu_free_work(struct work_struct *work_struct) mutex_unlock(&svms->free_list_lock); } +static void +svm_range_unmap_from_cpu(struct mm_struct *mm, unsigned long start, + unsigned long last) +{ + struct list_head remove_list; + struct list_head update_list; + struct list_head insert_list; + struct svm_range_list *svms; + struct svm_range new = {0}; + struct svm_range *prange; + struct svm_range *tmp; + struct kfd_process *p; + int r; + + p = kfd_lookup_process_by_mm(mm); + if (!p) + return; + svms = &p->svms; + + pr_debug("notifier svms 0x%p [0x%lx 0x%lx]\n", svms, start, last); + + svms_lock(svms); + + r = svm_range_handle_overlap(svms, &new, start, last, &update_list, + &insert_list, &remove_list, NULL); + if (r) { + svms_unlock(svms); + kfd_unref_process(p); + return; + } + + mutex_lock(&svms->free_list_lock); + list_for_each_entry_safe(prange, tmp, &remove_list, remove_list) { + pr_debug("remove svms 0x%p [0x%lx 0x%lx]\n", prange->svms, + prange->it_node.start, prange->it_node.last); + svm_range_unlink(prange); + + pr_debug("schedule to free svms 0x%p [0x%lx 0x%lx]\n", + prange->svms, prange->it_node.start, + prange->it_node.last); + list_add_tail(&prange->remove_list, &svms->free_list); + } + if (!list_empty(&svms->free_list)) + schedule_work(&svms->srcu_free_work); + mutex_unlock(&svms->free_list_lock); + + /* prange in update_list is unmapping from cpu, remove it from insert + * list + */ + list_for_each_entry_safe(prange, tmp, &update_list, update_list) { + list_del(&prange->list); + mutex_lock(&svms->free_list_lock); + list_add_tail(&prange->remove_list, &svms->free_list); + mutex_unlock(&svms->free_list_lock); + } + mutex_lock(&svms->free_list_lock); + if (!list_empty(&svms->free_list)) + schedule_work(&svms->srcu_free_work); + mutex_unlock(&svms->free_list_lock); + + list_for_each_entry_safe(prange, tmp, &insert_list, list) + svm_range_add_to_svms(prange); + + svms_unlock(svms); + kfd_unref_process(p); +} + /** * svm_range_cpu_invalidate_pagetables - interval notifier callback * + * MMU range unmap notifier to remove svm ranges */ static bool svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { + unsigned long start = range->start >> PAGE_SHIFT; + unsigned long last = (range->end - 1) >> PAGE_SHIFT; + struct svm_range_list *svms; + + svms = container_of(mni, struct svm_range_list, notifier); + + if (range->event == MMU_NOTIFY_RELEASE) { + pr_debug("cpu release range [0x%lx 0x%lx]\n", range->start, + range->end - 1); + return true; + } + if (range->event == MMU_NOTIFY_UNMAP) { + pr_debug("mm 0x%p unmap range [0x%lx 0x%lx]\n", range->mm, + start, last); + svm_range_unmap_from_cpu(mni->mm, start, last); + return true; + } + return true; } -- 2.29.2 _______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx