[PATCH 3/3] drm/amdgpu: fix PRT teardown on VM fini v2

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Christian König <christian.koenig@xxxxxxx>

v2: new approach fixing this by registering a fence callback for
    all users of the VM on teardown

Signed-off-by: Christian König <christian.koenig at amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 106 +++++++++++++++++++++++++--------
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h |   2 +-
 2 files changed, 82 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index c11b6b6..d3437ad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -1188,22 +1188,31 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
 	bool enable;
 
 	spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
-	enable = !!atomic_read(&adev->vm_manager.num_prt_mappings);
+	enable = !!atomic_read(&adev->vm_manager.num_prt_users);
 	adev->gart.gart_funcs->set_prt(adev, enable);
 	spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
 }
 
 /**
+ * amdgpu_vm_prt_put - add a PRT user
+ */
+static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
+{
+	if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
+		amdgpu_vm_update_prt_state(adev);
+}
+
+/**
  * amdgpu_vm_prt_put - drop a PRT user
  */
 static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
 {
-	if (atomic_dec_return(&adev->vm_manager.num_prt_mappings) == 0)
+	if (atomic_dec_return(&adev->vm_manager.num_prt_users) == 0)
 		amdgpu_vm_update_prt_state(adev);
 }
 
 /**
- * amdgpu_vm_prt - callback for updating the PRT status
+ * amdgpu_vm_prt_cb - callback for updating the PRT status
  */
 static void amdgpu_vm_prt_cb(struct fence *fence, struct fence_cb *_cb)
 {
@@ -1214,6 +1223,29 @@ static void amdgpu_vm_prt_cb(struct fence *fence, struct fence_cb *_cb)
 }
 
 /**
+ * amdgpu_vm_add_prt_cb - add callback for updating the PRT status
+ */
+static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
+				 struct fence *fence)
+{
+	struct amdgpu_prt_cb *cb = kmalloc(sizeof(struct amdgpu_prt_cb),
+					   GFP_KERNEL);
+
+	if (!cb) {
+		/* Last resort when we are OOM */
+		if (fence)
+			fence_wait(fence, false);
+
+		amdgpu_vm_prt_put(cb->adev);
+	} else {
+		cb->adev = adev;
+		if (!fence || fence_add_callback(fence, &cb->cb,
+						 amdgpu_vm_prt_cb))
+			amdgpu_vm_prt_cb(fence, &cb->cb);
+	}
+}
+
+/**
  * amdgpu_vm_free_mapping - free a mapping
  *
  * @adev: amdgpu_device pointer
@@ -1228,24 +1260,47 @@ static void amdgpu_vm_free_mapping(struct amdgpu_device *adev,
 				   struct amdgpu_bo_va_mapping *mapping,
 				   struct fence *fence)
 {
-	if (mapping->flags & AMDGPU_PTE_PRT) {
-		struct amdgpu_prt_cb *cb = kmalloc(sizeof(struct amdgpu_prt_cb),
-						   GFP_KERNEL);
+	if (mapping->flags & AMDGPU_PTE_PRT)
+		amdgpu_vm_add_prt_cb(adev, fence);
+	kfree(mapping);
+}
 
-		if (!cb) {
-			/* Last resort when we are OOM */
-			if (fence)
-				fence_wait(fence, false);
+/**
+ * amdgpu_vm_prt_fini - finish all prt mappings
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
+ *
+ * Register a cleanup callback to disable PRT support after VM dies.
+ */
+static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+	struct reservation_object *resv = vm->page_directory->tbo.resv;
+	struct fence *excl, **shared;
+	unsigned i, shared_count;
+	int r;
 
-			amdgpu_vm_prt_put(cb->adev);
-		} else {
-			cb->adev = adev;
-			if (!fence || fence_add_callback(fence, &cb->cb,
-							 amdgpu_vm_prt_cb))
-				amdgpu_vm_prt_cb(fence, &cb->cb);
-		}
+	r = reservation_object_get_fences_rcu(resv, &excl,
+					      &shared_count, &shared);
+	if (r) {
+		/* Not enough memory to grab the fence list, as last resort
+		 * block for all the fences to complete.
+		 */
+		reservation_object_wait_timeout_rcu(resv, true, false,
+						    MAX_SCHEDULE_TIMEOUT);
+		return;
 	}
-	kfree(mapping);
+
+	/* Add a callback for each fence in the reservation object */
+	amdgpu_vm_prt_get(adev);
+	amdgpu_vm_add_prt_cb(adev, excl);
+
+	for (i = 0; i < shared_count; ++i) {
+		amdgpu_vm_prt_get(adev);
+		amdgpu_vm_add_prt_cb(adev, shared[i]);
+	}
+
+	kfree(shared);
 }
 
 /**
@@ -1395,8 +1450,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 		if (!adev->gart.gart_funcs->set_prt)
 			return -EINVAL;
 
-		if (atomic_inc_return(&adev->vm_manager.num_prt_mappings) == 1)
-			amdgpu_vm_update_prt_state(adev);
+		amdgpu_vm_prt_get(adev);
 	}
 
 	/* make sure object fit at this offset */
@@ -1699,6 +1753,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
 	struct amdgpu_bo_va_mapping *mapping, *tmp;
+	bool prt_fini_called = false;
 	int i;
 
 	amd_sched_entity_fini(vm->entity.sched, &vm->entity);
@@ -1712,13 +1767,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 		kfree(mapping);
 	}
 	list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
-		if (mapping->flags & AMDGPU_PTE_PRT)
-			continue;
+		if (mapping->flags & AMDGPU_PTE_PRT && !prt_fini_called) {
+			amdgpu_vm_prt_fini(adev, vm);
+			prt_fini_called = true;
+		}
 
 		list_del(&mapping->list);
-		kfree(mapping);
+		amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
 	}
-	amdgpu_vm_clear_freed(adev, vm);
 
 	for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) {
 		struct amdgpu_bo *pt = vm->page_tables[i].bo;
@@ -1764,7 +1820,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
 	atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
 	atomic64_set(&adev->vm_manager.client_counter, 0);
 	spin_lock_init(&adev->vm_manager.prt_lock);
-	atomic_set(&adev->vm_manager.num_prt_mappings, 0);
+	atomic_set(&adev->vm_manager.num_prt_users, 0);
 }
 
 /**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 51fa12f..7fccd486 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -164,7 +164,7 @@ struct amdgpu_vm_manager {
 
 	/* partial resident texture handling */
 	spinlock_t				prt_lock;
-	atomic_t				num_prt_mappings;
+	atomic_t				num_prt_users;
 };
 
 void amdgpu_vm_manager_init(struct amdgpu_device *adev);
-- 
2.5.0



[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux