[patch 1/2] kernel: introduce brlock

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This second patchset scales the vfsmount lock. When it was last posted,
you were worried about commenting of lock requirements, and impact on
the slowpath. I have added comments and also done some slowpath measurements.

--
brlock: introduce special brlocks

This patch introduces special brlocks, these can only be used as global
locks, and use some preprocessor trickery to allow us to retain a more
optimal per-cpu lock implementation. We don't bother working around
lockdep yet.

The other thing we can do in future is a really neat atomic-free
implementation like Dave M did for the old brlocks, so we might actually
be able to speed up the single-thread path for these things.

Signed-off-by: Nick Piggin <npiggin@xxxxxxx>
---
 include/linux/brlock.h |  112 +++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 112 insertions(+)

Index: linux-2.6/include/linux/brlock.h
===================================================================
--- /dev/null
+++ linux-2.6/include/linux/brlock.h
@@ -0,0 +1,112 @@
+/*
+ * Specialised big-reader spinlock. Can only be declared as global variables
+ * to avoid overhead and keep things simple (and we don't want to start using
+ * these inside dynamically allocated structures).
+ *
+ * Copyright 2009, Nick Piggin, Novell Inc.
+ */
+#ifndef __LINUX_BRLOCK_H
+#define __LINUX_BRLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
+#include <asm/atomic.h>
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_LOCKDEP)
+#define DECLARE_BRLOCK(name)						\
+ DECLARE_PER_CPU(spinlock_t, name##_lock);				\
+ extern void name##_lock_init(void);					\
+ static inline void name##_rlock(void) {				\
+	spinlock_t *lock;						\
+	lock = &get_cpu_var(name##_lock);				\
+	spin_lock(lock);						\
+	put_cpu_var(name##_lock);					\
+ }									\
+ static inline void name##_runlock(void) {				\
+	spinlock_t *lock;						\
+	lock = &__get_cpu_var(name##_lock);				\
+	spin_unlock(lock);						\
+ }									\
+ extern void name##_wlock(void);					\
+ extern void name##_wunlock(void);					\
+ static inline int name##_atomic_dec_and_rlock(atomic_t *a) {		\
+	int ret;							\
+	spinlock_t *lock;						\
+	lock = &get_cpu_var(name##_lock);				\
+	ret = atomic_dec_and_lock(a, lock);				\
+	put_cpu_var(name##_lock);					\
+	return ret;							\
+ }									\
+ extern int name##_atomic_dec_and_wlock__failed(atomic_t *a);		\
+ static inline int name##_atomic_dec_and_wlock(atomic_t *a) {		\
+	if (atomic_add_unless(a, -1, 1))				\
+		return 0;						\
+	return name##_atomic_dec_and_wlock__failed(a);			\
+ }
+
+#define DEFINE_BRLOCK(name)						\
+ DEFINE_PER_CPU(spinlock_t, name##_lock);				\
+ void name##_lock_init(void) {						\
+	int i;								\
+	for_each_possible_cpu(i) {					\
+		spinlock_t *lock;					\
+		lock = &per_cpu(name##_lock, i);			\
+		spin_lock_init(lock);					\
+	}								\
+ }									\
+ void name##_wlock(void) {						\
+	int i;								\
+	for_each_online_cpu(i) {					\
+		spinlock_t *lock;					\
+		lock = &per_cpu(name##_lock, i);			\
+		spin_lock(lock);					\
+	}								\
+ }									\
+ void name##_wunlock(void) {						\
+	int i;								\
+	for_each_online_cpu(i) {					\
+		spinlock_t *lock;					\
+		lock = &per_cpu(name##_lock, i);			\
+		spin_unlock(lock);					\
+	}								\
+ }									\
+ int name##_atomic_dec_and_wlock__failed(atomic_t *a) {			\
+	name##_wlock();							\
+	if (!atomic_dec_and_test(a)) {					\
+		name##_wunlock();					\
+		return 0;						\
+	}								\
+	return 1;							\
+ }
+
+#else
+
+#define DECLARE_BRLOCK(name)						\
+ extern spinlock_t name##_lock;						\
+ static inline void name##_lock_init(void) {				\
+	spin_lock_init(&name##_lock);					\
+ }									\
+ static inline void name##_rlock(void) {				\
+	spin_lock(&name##_lock);					\
+ }									\
+ static inline void name##_runlock(void) {				\
+	spin_unlock(&name##_lock);					\
+ }									\
+ static inline void name##_wlock(void) {				\
+	spin_lock(&name##_lock);					\
+ }									\
+ static inline void name##_wunlock(void) {				\
+	spin_unlock(&name##_lock);					\
+ }									\
+ static inline int name##_atomic_dec_and_rlock(atomic_t *a) {		\
+	return atomic_dec_and_lock(a, &name##_lock);			\
+ }									\
+ static inline int name##_atomic_dec_and_wlock(atomic_t *a) {		\
+	return atomic_dec_and_lock(a, &name##_lock);			\
+ }
+
+#define DEFINE_BRLOCK(name)						\
+ spinlock_t name##_lock
+#endif
+
+#endif
--
To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [Samba]     [Device Mapper]     [CEPH Development]
  Powered by Linux