[PATCH] locking/mutex: Add waiter parameter to mutex_optimistic_spin()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Waiman Long <Waiman.Long@xxxxxxx>

This patch adds a new waiter parameter to the mutex_optimistic_spin()
function to prepare it to be used by a waiter-spinner that doesn't
need to go into the OSQ as there can only be one waiter-spinner which
is the head of the waiting queue.

Signed-off-by: Waiman Long <Waiman.Long@xxxxxx>
---
 kernel/locking/mutex.c | 66 +++++++++++++++++++++++++++++++++++---------------
 1 file changed, 46 insertions(+), 20 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index a70b90d..875c925 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -273,11 +273,15 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
 
 /*
  * Atomically try to take the lock when it is available
+ *
+ * For waiter-spinner, the count needs to be set to -1 first which will be
+ * cleared to 0 later on if the list becomes empty. For regular spinner,
+ * the count will be set to 0.
  */
-static inline bool mutex_try_to_acquire(struct mutex *lock)
+static inline bool mutex_try_to_acquire(struct mutex *lock, int waiter)
 {
 	return !mutex_is_locked(lock) &&
-		(atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
+		(atomic_cmpxchg_acquire(&lock->count, 1, waiter ? -1 : 0) == 1);
 }
 
 /*
@@ -302,22 +306,42 @@ static inline bool mutex_try_to_acquire(struct mutex *lock)
  *
  * Returns true when the lock was taken, otherwise false, indicating
  * that we need to jump to the slowpath and sleep.
+ *
+ * The waiter flag is set to true if the spinner is a waiter in the wait
+ * queue. As the waiter has slept for a while, it should have priority to
+ * get the lock over the regular spinners. So going to wait at the end of
+ * the OSQ isn't fair to the waiter. Instead, it will spin on the lock
+ * directly and concurrently with the spinner at the head of the OSQ, if
+ * present. There may be a bit more cacheline contention in this case.
+ * The waiter also needs to set the lock to -1 instead of 0 on lock
+ * acquisition.
  */
 static bool mutex_optimistic_spin(struct mutex *lock,
-				  struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+				  struct ww_acquire_ctx *ww_ctx,
+				  const bool use_ww_ctx, int waiter)
 {
 	struct task_struct *task = current;
+	bool acquired = false;
 
-	if (!mutex_can_spin_on_owner(lock))
-		goto done;
+	if (!waiter) {
+		/*
+		 * The purpose of the mutex_can_spin_on_owner() function is
+		 * to eliminate the overhead of osq_lock() and osq_unlock()
+		 * in case spinning isn't possible. As a waiter-spinner
+		 * is not going to take OSQ lock anyway, there is no need
+		 * to call mutex_can_spin_on_owner().
+		 */
+		if (!mutex_can_spin_on_owner(lock))
+			goto done;
 
-	/*
-	 * In order to avoid a stampede of mutex spinners trying to
-	 * acquire the mutex all at once, the spinners need to take a
-	 * MCS (queued) lock first before spinning on the owner field.
-	 */
-	if (!osq_lock(&lock->osq))
-		goto done;
+		/*
+		 * In order to avoid a stampede of mutex spinners trying to
+		 * acquire the mutex all at once, the spinners need to take a
+		 * MCS (queued) lock first before spinning on the owner field.
+		 */
+		if (!osq_lock(&lock->osq))
+			goto done;
+	}
 
 	while (true) {
 		struct task_struct *owner;
@@ -347,7 +371,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
 			break;
 
 		/* Try to acquire the mutex if it is unlocked. */
-		if (mutex_try_to_acquire(lock)) {
+		if (mutex_try_to_acquire(lock, waiter)) {
 			lock_acquired(&lock->dep_map, ip);
 
 			if (use_ww_ctx) {
@@ -358,8 +382,8 @@ static bool mutex_optimistic_spin(struct mutex *lock,
 			}
 
 			mutex_set_owner(lock);
-			osq_unlock(&lock->osq);
-			return true;
+			acquired = true;
+			break;
 		}
 
 		/*
@@ -380,14 +404,15 @@ static bool mutex_optimistic_spin(struct mutex *lock,
 		cpu_relax_lowlatency();
 	}
 
-	osq_unlock(&lock->osq);
+	if (!waiter)
+		osq_unlock(&lock->osq);
 done:
 	/*
 	 * If we fell out of the spin path because of need_resched(),
 	 * reschedule now, before we try-lock the mutex. This avoids getting
 	 * scheduled out right after we obtained the mutex.
 	 */
-	if (need_resched()) {
+	if (!acquired && need_resched()) {
 		/*
 		 * We _should_ have TASK_RUNNING here, but just in case
 		 * we do not, make it so, otherwise we might get stuck.
@@ -396,11 +421,12 @@ done:
 		schedule_preempt_disabled();
 	}
 
-	return false;
+	return acquired;
 }
 #else
 static bool mutex_optimistic_spin(struct mutex *lock,
-				  struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+				  struct ww_acquire_ctx *ww_ctx,
+				  const bool use_ww_ctx, int waiter)
 {
 	return false;
 }
@@ -520,7 +546,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
 	preempt_disable();
 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
-	if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
+	if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
 		/* got the lock, yay! */
 		preempt_enable();
 		return 0;
-- 
2.5.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@xxxxxxxxxxxxxxxxxxxxx
https://lists.freedesktop.org/mailman/listinfo/intel-gfx




[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]
  Powered by Linux