On 2021-06-11 12:51 a.m., Alex Sierra
wrote:
should take svms lock[Why] Avoid duplicated memory allocation for address ranges that have been already allocated by either ioctl_alloc_memory_of_gpu or SVM mechanisms first. [How] For SVM first allocations Check if the address range passed into ioctl memory alloc does not exist already in the kfd_process svms->objects interval tree. For ioctl_alloc_memory_of_gpu first allocations Look for the address range into the interval tree VA from the VM inside of each pdds used in a kfd_process. Signed-off-by: Alex Sierra <alex.sierra@xxxxxxx> --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 11 +++++ drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 63 ++++++++++++++++++------ 2 files changed, 59 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 5788a4656fa1..0cfa685d9b8a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1259,6 +1259,17 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, if (args->size == 0) return -EINVAL; +#if IS_ENABLED(CONFIG_HSA_AMD_SVM) + struct svm_range_list *svms = &p->svms; +
take vm root page table bo lock+ if (interval_tree_iter_first(&svms->objects, + args->va_addr >> PAGE_SHIFT, + (args->va_addr + args->size - 1) >> PAGE_SHIFT)) { + pr_info("Address: 0x%llx already allocated by SVM\n", + args->va_addr); + return -EADDRINUSE; + } +#endif dev = kfd_device_by_id(args->gpu_id); if (!dev) return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 0f18bd0dc64e..883a9659cf8e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2485,9 +2485,40 @@ int svm_range_list_init(struct kfd_process *p) return 0; } +/** + * svm_range_is_vm_bo_mapped - check if virtual address range mapped already + * @p: current kfd_process + * @start: range start address, in pages + * @last: range last address, in pages + * + * The purpose is to avoid virtual address ranges already allocated by + * traditional kfd_ioctl_alloc_memory_of_gpu ioctl. + * It looks for each pdd in the kfd_process. + * + * Context: Process context + * + * Return true only if range has been mapped + */ +static bool +svm_range_is_vm_bo_mapped(struct kfd_process *p, uint64_t start, uint64_t last) +{ + uint32_t i; + + for (i = 0; i < p->n_pdds; i++) { + struct amdgpu_vm *vm = drm_priv_to_vm(p->pdds[i]->drm_priv); +
svm_range_is_vm_bo_mapped returns bool, this should return bool, or change+ if (vm && interval_tree_iter_first(&vm->va, start, last)) { + pr_debug("Range [0x%llx 0x%llx] already mapped\n", start, last); + return true; + } + } + + return false; +} + /** * svm_range_is_valid - check if virtual address range is valid - * @mm: current process mm_struct + * @mm: current kfd_process * @start: range start address, in pages * @size: range size, in pages * @@ -2496,28 +2527,28 @@ int svm_range_list_init(struct kfd_process *p) * Context: Process context * * Return: - * true - valid svm range - * false - invalid svm range + * 0 - OK, otherwise error code */ -static bool -svm_range_is_valid(struct mm_struct *mm, uint64_t start, uint64_t size) +static int +svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
svm_range_is_vm_bo_mapped to return int error code. Regards, Philip
{ const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP; struct vm_area_struct *vma; unsigned long end; + unsigned long start_unchg = start; start <<= PAGE_SHIFT; end = start + (size << PAGE_SHIFT); - do { - vma = find_vma(mm, start); + vma = find_vma(p->mm, start); if (!vma || start < vma->vm_start || (vma->vm_flags & device_vma)) - return false; + return -EFAULT; start = min(end, vma->vm_end); } while (start < end); - return true; + return svm_range_is_vm_bo_mapped(p, start_unchg, (end - 1) >> PAGE_SHIFT) ? + -EADDRINUSE:0; } /** @@ -2826,9 +2857,9 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, svm_range_list_lock_and_flush_work(svms, mm); - if (!svm_range_is_valid(mm, start, size)) { - pr_debug("invalid range\n"); - r = -EFAULT; + r = svm_range_is_valid(p, start, size); + if (r) { + pr_debug("invalid range r=%d\n", r); mmap_write_unlock(mm); goto out; } @@ -2929,15 +2960,17 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size, uint32_t flags = 0xffffffff; int gpuidx; uint32_t i; + int r = 0; pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start, start + size - 1, nattr); mmap_read_lock(mm); - if (!svm_range_is_valid(mm, start, size)) { - pr_debug("invalid range\n"); + r = svm_range_is_valid(p, start, size); + if (r) { + pr_debug("invalid range r=%d\n", r); mmap_read_unlock(mm); - return -EINVAL; + return r; } mmap_read_unlock(mm);
_______________________________________________ amd-gfx mailing list amd-gfx@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/amd-gfx