Re: VM lockdep warning

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 21.04.2012 17:57, Dave Airlie wrote:
2012/4/21 Jerome Glisse<j.glisse@xxxxxxxxx>:
2012/4/21 Christian König<deathsimple@xxxxxxxxxxx>:
On 21.04.2012 16:08, Jerome Glisse wrote:
2012/4/21 Christian König<deathsimple@xxxxxxxxxxx>:
Interesting, I'm pretty sure that I haven't touched the locking order of
the
cs_mutex vs. vm_mutex.

Maybe it is just some kind of side effect, going to locking into it
anyway.

Christian.

It's the using, init path take lock in different order than cs path
Well, could you explain to me why the vm code takes cs mutex in the first
place?

It clearly has it's own mutex and it doesn't looks like that it deals with
any cs related data anyway.

Christian.
Lock simplification is on my todo. The issue is that vm manager is protected by
cs_mutex The vm.mutex is specific to each vm it doesn't protect the global vm
management. I didn't wanted to introduce a new global vm mutex as vm activity
is mostly trigger on behalf of cs so i dediced to use the cs mutex.

That's why non cs path of vm need to take the cs mutex.
So if one app is adding a bo, and another doing CS, isn't deadlock a
real possibility?
Yeah, I think so.
I expect the VM code need to take CS mutex earlier then.

I would strongly suggest to give the vm code their own global mutex and remove the per vm mutex, cause the later is pretty superfluous if the cs_mutex is also taken most of the time.

The attached patch is against drm-fixes and does exactly that.

Christian.
>From b6a79c2e54f8200e770c25e930b0784343105a2b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Christian=20K=C3=B6nig?= <deathsimple@xxxxxxxxxxx>
Date: Sat, 21 Apr 2012 18:29:34 +0200
Subject: [PATCH] drm/radeon: use a global mutex instead of per vm one
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Resolving deadlock problems with the cs_mutex.

Signed-off-by: Christian König <deathsimple@xxxxxxxxxxx>
---
 drivers/gpu/drm/radeon/radeon.h        |    2 +-
 drivers/gpu/drm/radeon/radeon_device.c |    1 +
 drivers/gpu/drm/radeon/radeon_gart.c   |   25 +++++++++----------------
 3 files changed, 11 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 138b952..f35957d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -680,7 +680,6 @@ struct radeon_vm {
 	u64				pt_gpu_addr;
 	u64				*pt;
 	struct radeon_sa_bo		sa_bo;
-	struct mutex			mutex;
 	/* last fence for cs using this vm */
 	struct radeon_fence		*fence;
 };
@@ -1527,6 +1526,7 @@ struct radeon_device {
 	struct radeon_pm		pm;
 	uint32_t			bios_scratch[RADEON_BIOS_NUM_SCRATCH];
 	struct radeon_mutex		cs_mutex;
+	struct mutex			vm_mutex;
 	struct radeon_wb		wb;
 	struct radeon_dummy_page	dummy_page;
 	bool				gpu_lockup;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index ea7df16..cecb785 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -725,6 +725,7 @@ int radeon_device_init(struct radeon_device *rdev,
 	 * can recall function without having locking issues */
 	radeon_mutex_init(&rdev->cs_mutex);
 	radeon_mutex_init(&rdev->ib_pool.mutex);
+	mutex_init(&rdev->vm_mutex);
 	for (i = 0; i < RADEON_NUM_RINGS; ++i)
 		mutex_init(&rdev->ring[i].mutex);
 	mutex_init(&rdev->dc_hw_i2c_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index c58a036..1b4933b 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -356,13 +356,13 @@ int radeon_vm_manager_suspend(struct radeon_device *rdev)
 {
 	struct radeon_vm *vm, *tmp;
 
-	radeon_mutex_lock(&rdev->cs_mutex);
+	mutex_lock(&rdev->vm_mutex);
 	/* unbind all active vm */
 	list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
 		radeon_vm_unbind_locked(rdev, vm);
 	}
 	rdev->vm_manager.funcs->fini(rdev);
-	radeon_mutex_unlock(&rdev->cs_mutex);
+	mutex_unlock(&rdev->vm_mutex);
 	return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
 }
 
@@ -476,13 +476,11 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
 		return -EINVAL;
 	}
 
-	mutex_lock(&vm->mutex);
+	mutex_lock(&rdev->vm_mutex);
 	if (last_pfn > vm->last_pfn) {
 		/* grow va space 32M by 32M */
 		unsigned align = ((32 << 20) >> 12) - 1;
-		radeon_mutex_lock(&rdev->cs_mutex);
 		radeon_vm_unbind_locked(rdev, vm);
-		radeon_mutex_unlock(&rdev->cs_mutex);
 		vm->last_pfn = (last_pfn + align) & ~align;
 	}
 	head = &vm->va;
@@ -498,7 +496,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
 				bo, (unsigned)bo_va->soffset, tmp->bo,
 				(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
 			kfree(bo_va);
-			mutex_unlock(&vm->mutex);
+			mutex_unlock(&rdev->vm_mutex);
 			return -EINVAL;
 		}
 		last_offset = tmp->eoffset;
@@ -506,7 +504,7 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
 	}
 	list_add(&bo_va->vm_list, head);
 	list_add_tail(&bo_va->bo_list, &bo->va);
-	mutex_unlock(&vm->mutex);
+	mutex_unlock(&rdev->vm_mutex);
 	return 0;
 }
 
@@ -597,13 +595,11 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
 	if (bo_va == NULL)
 		return 0;
 
-	mutex_lock(&vm->mutex);
-	radeon_mutex_lock(&rdev->cs_mutex);
+	mutex_lock(&rdev->vm_mutex);
 	radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
-	radeon_mutex_unlock(&rdev->cs_mutex);
 	list_del(&bo_va->vm_list);
-	mutex_unlock(&vm->mutex);
 	list_del(&bo_va->bo_list);
+	mutex_unlock(&rdev->vm_mutex);
 
 	kfree(bo_va);
 	return 0;
@@ -643,11 +639,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 	struct radeon_bo_va *bo_va, *tmp;
 	int r;
 
-	mutex_lock(&vm->mutex);
-
-	radeon_mutex_lock(&rdev->cs_mutex);
+	mutex_lock(&rdev->vm_mutex);
 	radeon_vm_unbind_locked(rdev, vm);
-	radeon_mutex_unlock(&rdev->cs_mutex);
 
 	/* remove all bo */
 	r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
@@ -670,5 +663,5 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
 			kfree(bo_va);
 		}
 	}
-	mutex_unlock(&vm->mutex);
+	mutex_unlock(&rdev->vm_mutex);
 }
-- 
1.7.5.4

_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/dri-devel

[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux