[PATCH 09/10] drm/ttm: remove lru_lock around ttm_bo_reserve

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



There should no longer be assumptions that reserve will always succeed
with the lru lock held, so we can safely break the whole atomic
reserve/lru thing. As a bonus this fixes most lockdep annotations for
reservations.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@xxxxxxxxxxxxx>
---
 drivers/gpu/drm/ttm/ttm_bo.c           | 50 ++++++++++++++++++++++------------
 drivers/gpu/drm/ttm/ttm_execbuf_util.c |  2 +-
 include/drm/ttm/ttm_bo_driver.h        | 17 ++----------
 3 files changed, 37 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a760178..e57dae5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -221,14 +221,13 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
 	return put_count;
 }
 
-int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
 			  bool interruptible,
 			  bool no_wait, bool use_sequence, uint32_t sequence)
 {
-	struct ttm_bo_global *glob = bo->glob;
 	int ret;
 
-	while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+	while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
 		/**
 		 * Deadlock avoidance for multi-bo reserving.
 		 */
@@ -249,25 +248,36 @@ int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
 		if (no_wait)
 			return -EBUSY;
 
-		spin_unlock(&glob->lru_lock);
 		ret = ttm_bo_wait_unreserved(bo, interruptible);
-		spin_lock(&glob->lru_lock);
 
 		if (unlikely(ret))
 			return ret;
 	}
 
 	if (use_sequence) {
+		bool wake_up = false;
 		/**
 		 * Wake up waiters that may need to recheck for deadlock,
 		 * if we decreased the sequence number.
 		 */
 		if (unlikely((bo->val_seq - sequence < (1 << 31))
 			     || !bo->seq_valid))
-			wake_up_all(&bo->event_queue);
+			wake_up = true;
 
+		/*
+		 * In the worst case with memory ordering these values can be
+		 * seen in the wrong order. However since we call wake_up_all
+		 * in that case, this will hopefully not pose a problem,
+		 * and the worst case would only cause someone to accidentally
+		 * hit -EAGAIN in ttm_bo_reserve when they see old value of
+		 * val_seq. However this would only happen if seq_valid was
+		 * written before val_seq was, and just means some slightly
+		 * increased cpu usage
+		 */
 		bo->val_seq = sequence;
 		bo->seq_valid = true;
+		if (wake_up)
+			wake_up_all(&bo->event_queue);
 	} else {
 		bo->seq_valid = false;
 	}
@@ -298,14 +308,14 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
 
 	WARN_ON(!atomic_read(&bo->kref.refcount));
 
-	spin_lock(&glob->lru_lock);
-	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
+	ret = ttm_bo_reserve_nolru(bo, interruptible, no_wait, use_sequence,
 				    sequence);
-	if (likely(ret == 0))
+	if (likely(ret == 0)) {
+		spin_lock(&glob->lru_lock);
 		put_count = ttm_bo_del_from_lru(bo);
-	spin_unlock(&glob->lru_lock);
-
-	ttm_bo_list_ref_sub(bo, put_count, true);
+		spin_unlock(&glob->lru_lock);
+		ttm_bo_list_ref_sub(bo, put_count, true);
+	}
 
 	return ret;
 }
@@ -486,7 +496,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 	int ret;
 
 	spin_lock(&glob->lru_lock);
-	ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+	ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
 	if (!ret) {
 		spin_lock(&bdev->fence_lock);
 		ret = ttm_bo_wait(bo, false, false, true);
@@ -631,8 +641,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 			kref_get(&nentry->list_kref);
 		}
 
-		ret = ttm_bo_reserve_locked(entry, false, !remove_all,
-					    false, 0);
+		ret = ttm_bo_reserve_nolru(entry, false, true, false, 0);
+		if (remove_all && ret) {
+			spin_unlock(&glob->lru_lock);
+			ret = ttm_bo_reserve_nolru(entry, false, false,
+						   false, 0);
+			spin_lock(&glob->lru_lock);
+		}
+
 		if (ret)
 			spin_unlock(&glob->lru_lock);
 		else
@@ -783,7 +799,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 
 	spin_lock(&glob->lru_lock);
 	list_for_each_entry(bo, &man->lru, lru) {
-		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
 		if (!ret)
 			break;
 	}
@@ -1763,7 +1779,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
 		 * we slept.
 		 */
 
-		ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+		ret = ttm_bo_reserve_nolru(bo, false, true, false, 0);
 		if (!ret)
 			break;
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 5490492..b3fe824 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -154,7 +154,7 @@ retry:
 
 		WARN_ON(!atomic_read(&bo->kref.refcount));
 retry_this_bo:
-		ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
+		ret = ttm_bo_reserve_nolru(bo, true, true, true, val_seq);
 		switch (ret) {
 		case 0:
 			break;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 887c3c0..e9cdae1 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -793,15 +793,6 @@ extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
  * to make room for a buffer already reserved. (Buffers are reserved before
  * they are evicted). The following algorithm prevents such deadlocks from
  * occurring:
- * 1) Buffers are reserved with the lru spinlock held. Upon successful
- * reservation they are removed from the lru list. This stops a reserved buffer
- * from being evicted. However the lru spinlock is released between the time
- * a buffer is selected for eviction and the time it is reserved.
- * Therefore a check is made when a buffer is reserved for eviction, that it
- * is still the first buffer in the lru list, before it is removed from the
- * list. @check_lru == 1 forces this check. If it fails, the function returns
- * -EINVAL, and the caller should then choose a new buffer to evict and repeat
- * the procedure.
  * 2) Processes attempting to reserve multiple buffers other than for eviction,
  * (typically execbuf), should first obtain a unique 32-bit
  * validation sequence number,
@@ -835,7 +826,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
 
 
 /**
- * ttm_bo_reserve_locked:
+ * ttm_bo_reserve_nolru:
  *
  * @bo: A pointer to a struct ttm_buffer_object.
  * @interruptible: Sleep interruptible if waiting.
@@ -843,9 +834,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
  * @use_sequence: If @bo is already reserved, Only sleep waiting for
  * it to become unreserved if @sequence < (@bo)->sequence.
  *
- * Must be called with struct ttm_bo_global::lru_lock held,
- * and will not remove reserved buffers from the lru lists.
- * The function may release the LRU spinlock if it needs to sleep.
+ * Will not remove reserved buffers from the lru lists.
  * Otherwise identical to ttm_bo_reserve.
  *
  * Returns:
@@ -858,7 +847,7 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
  * -EDEADLK: Bo already reserved using @sequence. This error code will only
  * be returned if @use_sequence is set to true.
  */
-extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+extern int ttm_bo_reserve_nolru(struct ttm_buffer_object *bo,
 				 bool interruptible,
 				 bool no_wait, bool use_sequence,
 				 uint32_t sequence);
-- 
1.8.0

_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/dri-devel


[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux