+ return 0;
+}
+
+static void amdgpu_vm_pasid_remove(struct amdgpu_device *adev,
+ unsigned int pasid,
+ unsigned int *vm_pasid)
+{
+ unsigned long flags;
+
+ if (!pasid)
+ return;
+
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ idr_remove(&adev->vm_manager.pasid_idr, pasid);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+
+ if (vm_pasid)
+ *vm_pasid = 0;
+}
+
/*
* vm eviction_lock can be taken in MMU notifiers. Make sure no
reclaim-FS
* happens while holding this lock anywhere to prevent deadlocks when
@@ -2940,18 +2980,8 @@ int amdgpu_vm_init(struct amdgpu_device
*adev, struct amdgpu_vm *vm, u32 pasid)
amdgpu_bo_unreserve(vm->root.bo);
- if (pasid) {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid
+ 1,
- GFP_ATOMIC);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
- if (r < 0)
- goto error_free_root;
-
- vm->pasid = pasid;
- }
+ if (amdgpu_vm_pasid_alloc(adev, vm, pasid, &vm->pasid))
+ goto error_free_root;
INIT_KFIFO(vm->faults);
@@ -3038,19 +3068,11 @@ int amdgpu_vm_make_compute(struct
amdgpu_device *adev, struct amdgpu_vm *vm,
r = amdgpu_vm_check_clean_reserved(adev, vm);
if (r)
goto unreserve_bo;
+ r = amdgpu_vm_pasid_alloc(adev, vm, pasid, NULL);
+ if (r == -ENOSPC)
+ goto unreserve_bo;
- if (pasid) {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid
+ 1,
- GFP_ATOMIC);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-
- if (r == -ENOSPC)
- goto unreserve_bo;
- r = 0;
- }
+ r = 0;
/* Check if PD needs to be reinitialized and do it before
* changing any other state, in case it fails.
@@ -3089,35 +3111,23 @@ int amdgpu_vm_make_compute(struct
amdgpu_device *adev, struct amdgpu_vm *vm,
vm->is_compute_context = true;
if (vm->pasid) {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
-
/* Free the original amdgpu allocated pasid
* Will be replaced with kfd allocated pasid
*/
amdgpu_pasid_free(vm->pasid);
- vm->pasid = 0;
+ amdgpu_vm_pasid_remove(adev, vm->pasid, &vm->pasid);
}
/* Free the shadow bo for compute VM */
amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.bo)->shadow);
-
if (pasid)
vm->pasid = pasid;
goto unreserve_bo;
free_idr:
- if (pasid) {
- unsigned long flags;
+ amdgpu_vm_pasid_remove(adev, pasid, NULL);
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- idr_remove(&adev->vm_manager.pasid_idr, pasid);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
- }
unreserve_bo:
amdgpu_bo_unreserve(vm->root.bo);
return r;
@@ -3133,14 +3143,7 @@ int amdgpu_vm_make_compute(struct
amdgpu_device *adev, struct amdgpu_vm *vm,
*/
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct
amdgpu_vm *vm)
{
- if (vm->pasid) {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
- }
- vm->pasid = 0;
+ amdgpu_vm_pasid_remove(adev, vm->pasid, &vm->pasid);
vm->is_compute_context = false;
}
@@ -3164,15 +3167,7 @@ void amdgpu_vm_fini(struct amdgpu_device
*adev, struct amdgpu_vm *vm)
root = amdgpu_bo_ref(vm->root.bo);
amdgpu_bo_reserve(root, true);
- if (vm->pasid) {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
- idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
- spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
- vm->pasid = 0;
- }
-
+ amdgpu_vm_pasid_remove(adev, vm->pasid, &vm->pasid);
dma_fence_wait(vm->last_unlocked, false);
dma_fence_put(vm->last_unlocked);
--
2.32.0