[PATCH RFC 3/4] lockref: rework CMPXCHG_LOOP to handle contention better

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



In a later patch, we want to change the open(..., O_CREAT) codepath to
avoid taking the inode->i_rwsem for write when the dentry already exists.
When we tested that initially, the performance devolved significantly
due to contention for the parent's d_lockref spinlock.

There are two problems with lockrefs today: First, once any concurrent
task takes the spinlock, they all end up taking the spinlock, which is
much more costly than a single cmpxchg operation. The second problem is
that once any task fails to cmpxchg 100 times, it falls back to the
spinlock. The upshot there is that even moderate contention can cause a
fallback to serialized spinlocking, which worsens performance.

This patch changes CMPXCHG_LOOP in 2 ways:

First, change the loop to spin instead of falling back to a locked
codepath when the spinlock is held. Once the lock is released, allow the
task to continue trying its cmpxchg loop as before instead of taking the
lock. Second, don't allow the cmpxchg loop to give up after 100 retries.
Just continue infinitely.

This greatly reduces contention on the lockref when there are large
numbers of concurrent increments and decrements occurring.

Signed-off-by: Jeff Layton <jlayton@xxxxxxxxxx>
---
 lib/lockref.c | 85 ++++++++++++++++++++++-------------------------------------
 1 file changed, 32 insertions(+), 53 deletions(-)

diff --git a/lib/lockref.c b/lib/lockref.c
index 2afe4c5d8919..b76941043fe9 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -8,22 +8,25 @@
  * Note that the "cmpxchg()" reloads the "old" value for the
  * failure case.
  */
-#define CMPXCHG_LOOP(CODE, SUCCESS) do {					\
-	int retry = 100;							\
-	struct lockref old;							\
-	BUILD_BUG_ON(sizeof(old) != 8);						\
-	old.lock_count = READ_ONCE(lockref->lock_count);			\
-	while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {  	\
-		struct lockref new = old;					\
-		CODE								\
-		if (likely(try_cmpxchg64_relaxed(&lockref->lock_count,		\
-						 &old.lock_count,		\
-						 new.lock_count))) {		\
-			SUCCESS;						\
-		}								\
-		if (!--retry)							\
-			break;							\
-	}									\
+#define CMPXCHG_LOOP(CODE, SUCCESS) do {						\
+	struct lockref old;								\
+	BUILD_BUG_ON(sizeof(old) != 8);							\
+	old.lock_count = READ_ONCE(lockref->lock_count);				\
+	for (;;) {									\
+		struct lockref new = old;						\
+											\
+		if (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) {	\
+			CODE								\
+			if (likely(try_cmpxchg64_relaxed(&lockref->lock_count,		\
+							 &old.lock_count,		\
+							 new.lock_count))) {		\
+				SUCCESS;						\
+			}								\
+		} else {								\
+			cpu_relax();							\
+			old.lock_count = READ_ONCE(lockref->lock_count);		\
+		}									\
+	}										\
 } while (0)
 
 #else
@@ -46,10 +49,8 @@ void lockref_get(struct lockref *lockref)
 	,
 		return;
 	);
-
-	spin_lock(&lockref->lock);
-	lockref->count++;
-	spin_unlock(&lockref->lock);
+	/* should never get here */
+	WARN_ON_ONCE(1);
 }
 EXPORT_SYMBOL(lockref_get);
 
@@ -60,8 +61,6 @@ EXPORT_SYMBOL(lockref_get);
  */
 int lockref_get_not_zero(struct lockref *lockref)
 {
-	int retval;
-
 	CMPXCHG_LOOP(
 		new.count++;
 		if (old.count <= 0)
@@ -69,15 +68,9 @@ int lockref_get_not_zero(struct lockref *lockref)
 	,
 		return 1;
 	);
-
-	spin_lock(&lockref->lock);
-	retval = 0;
-	if (lockref->count > 0) {
-		lockref->count++;
-		retval = 1;
-	}
-	spin_unlock(&lockref->lock);
-	return retval;
+	/* should never get here */
+	WARN_ON_ONCE(1);
+	return -1;
 }
 EXPORT_SYMBOL(lockref_get_not_zero);
 
@@ -88,8 +81,6 @@ EXPORT_SYMBOL(lockref_get_not_zero);
  */
 int lockref_put_not_zero(struct lockref *lockref)
 {
-	int retval;
-
 	CMPXCHG_LOOP(
 		new.count--;
 		if (old.count <= 1)
@@ -97,15 +88,9 @@ int lockref_put_not_zero(struct lockref *lockref)
 	,
 		return 1;
 	);
-
-	spin_lock(&lockref->lock);
-	retval = 0;
-	if (lockref->count > 1) {
-		lockref->count--;
-		retval = 1;
-	}
-	spin_unlock(&lockref->lock);
-	return retval;
+	/* should never get here */
+	WARN_ON_ONCE(1);
+	return -1;
 }
 EXPORT_SYMBOL(lockref_put_not_zero);
 
@@ -125,6 +110,8 @@ int lockref_put_return(struct lockref *lockref)
 	,
 		return new.count;
 	);
+	/* should never get here */
+	WARN_ON_ONCE(1);
 	return -1;
 }
 EXPORT_SYMBOL(lockref_put_return);
@@ -171,8 +158,6 @@ EXPORT_SYMBOL(lockref_mark_dead);
  */
 int lockref_get_not_dead(struct lockref *lockref)
 {
-	int retval;
-
 	CMPXCHG_LOOP(
 		new.count++;
 		if (old.count < 0)
@@ -180,14 +165,8 @@ int lockref_get_not_dead(struct lockref *lockref)
 	,
 		return 1;
 	);
-
-	spin_lock(&lockref->lock);
-	retval = 0;
-	if (lockref->count >= 0) {
-		lockref->count++;
-		retval = 1;
-	}
-	spin_unlock(&lockref->lock);
-	return retval;
+	/* should never get here */
+	WARN_ON_ONCE(1);
+	return -1;
 }
 EXPORT_SYMBOL(lockref_get_not_dead);

-- 
2.45.2





[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [NTFS 3]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [NTFS 3]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux