+ lock-validator-v3.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     lock validator: -V3

has been added to the -mm tree.  Its filename is

     lock-validator-v3.patch

See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find
out what to do about this

------------------------------------------------------
Subject: lock validator: -V3
From: Ingo Molnar <mingo@xxxxxxx>


This patch contains items that had to be done in one go, so i guess i'll
call this lock validator -V3, for lack of better name.  (This goes to the
tail of the current lock validator queue in -mm)

Changes:

Added 'detect if freed' (or if reinitialized/destroyed when held) feature
for all the lock types covered by the validator: spinlocks, rwlocks,
mutexes and rwsems - using the same central lock debugging code.  This
feature was available for mutexes before, now it's in the generic 'lock
debugging' code and hence extended to all lock types.

Accordingly, new lock debugging options are now available:

 CONFIG_DEBUG_SPINLOCK_ALLOC=y
 CONFIG_DEBUG_RWLOCK_ALLOC=y
 CONFIG_DEBUG_MUTEX_ALLOC=y
 CONFIG_DEBUG_RWSEM_ALLOC=y

Added 12 new testcases to the locking selftest, to make sure the new
features are working.

I unified all the lock debugging options, they are now all structured
as: 'basic', 'free/exit checking' and 'full validation', and are
dependent on each other. The lock types can be debugged independenty of
each other.

I also resurrected SysRq-D (print all locks) support.

All in one, the structure of the lock debugging code is alot more
consistent now, and all the lock debugging features are available across
the spectrum.

I have tested this on x86 and x86_64, using various combinations of the
new (and old) lock debugging options. Works fine here.

Signed-off-by: Ingo Molnar <mingo@xxxxxxx>
Cc: Arjan van de Ven <arjan@xxxxxxxxxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 drivers/char/sysrq.c              |    1 
 include/asm-i386/rwsem.h          |    4 
 include/linux/debug_locks.h       |   19 +-
 include/linux/lockdep.h           |   73 +++++++--
 include/linux/mutex.h             |    6 
 include/linux/rwsem-spinlock.h    |    4 
 include/linux/rwsem.h             |    4 
 include/linux/sched.h             |    1 
 include/linux/spinlock.h          |   16 --
 include/linux/spinlock_types.h    |    8 -
 include/linux/spinlock_types_up.h |    8 -
 include/linux/spinlock_up.h       |    5 
 kernel/lockdep.c                  |  211 +++++++++++++++++++++++-----
 kernel/mutex-debug.c              |    6 
 kernel/mutex-debug.h              |    3 
 kernel/mutex-lockdep.h            |   40 -----
 kernel/mutex.c                    |   17 --
 kernel/mutex.h                    |    2 
 kernel/spinlock.c                 |   38 -----
 lib/Kconfig.debug                 |   98 +++++++++----
 lib/locking-selftest-mutex.h      |    6 
 lib/locking-selftest-rlock.h      |    9 +
 lib/locking-selftest-rsem.h       |    9 +
 lib/locking-selftest-spin.h       |    6 
 lib/locking-selftest-wlock.h      |    9 +
 lib/locking-selftest-wsem.h       |    9 +
 lib/locking-selftest.c            |  107 ++++++++++++--
 lib/rwsem-spinlock.c              |   10 -
 lib/rwsem.c                       |   10 -
 lib/spinlock_debug.c              |   36 ++++
 30 files changed, 545 insertions(+), 230 deletions(-)

diff -puN drivers/char/sysrq.c~lock-validator-v3 drivers/char/sysrq.c
--- devel/drivers/char/sysrq.c~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/drivers/char/sysrq.c	2006-06-06 13:25:54.000000000 -0700
@@ -153,7 +153,6 @@ static void sysrq_handle_showlocks(int k
 				struct tty_struct *tty)
 {
 	debug_show_all_locks();
-	print_lock_types();
 }
 
 static struct sysrq_key_op sysrq_showlocks_op = {
diff -puN include/asm-i386/rwsem.h~lock-validator-v3 include/asm-i386/rwsem.h
--- devel/include/asm-i386/rwsem.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/include/asm-i386/rwsem.h	2006-06-06 13:25:54.000000000 -0700
@@ -65,7 +65,7 @@ struct rw_semaphore {
 #if RWSEM_DEBUG
 	int			debug;
 #endif
-#ifdef CONFIG_PROVE_RWSEM_LOCKING
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
 	struct lockdep_map dep_map;
 #endif
 };
@@ -79,7 +79,7 @@ struct rw_semaphore {
 #define __RWSEM_DEBUG_INIT	/* */
 #endif
 
-#ifdef CONFIG_PROVE_RWSEM_LOCKING
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
 # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
 #else
 # define __RWSEM_DEP_MAP_INIT(lockname)
diff -puN include/linux/debug_locks.h~lock-validator-v3 include/linux/debug_locks.h
--- devel/include/linux/debug_locks.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/include/linux/debug_locks.h	2006-06-06 13:25:54.000000000 -0700
@@ -41,22 +41,29 @@ extern int debug_locks_off(void);
 # define locking_selftest()	do { } while (0)
 #endif
 
-static inline void
-debug_check_no_locks_freed(const void *from, unsigned long len)
+#ifdef CONFIG_LOCKDEP
+extern void debug_show_all_locks(void);
+extern void debug_show_held_locks(struct task_struct *task);
+extern void debug_check_no_locks_freed(const void *from, unsigned long len);
+extern void debug_check_no_locks_held(struct task_struct *task);
+#else
+static inline void debug_show_all_locks(void)
 {
 }
 
-static inline void
-debug_check_no_locks_held(struct task_struct *task)
+static inline void debug_show_held_locks(struct task_struct *task)
 {
 }
 
-static inline void debug_show_all_locks(void)
+static inline void
+debug_check_no_locks_freed(const void *from, unsigned long len)
 {
 }
 
-static inline void debug_show_held_locks(struct task_struct *task)
+static inline void
+debug_check_no_locks_held(struct task_struct *task)
 {
 }
+#endif
 
 #endif
diff -puN include/linux/lockdep.h~lock-validator-v3 include/linux/lockdep.h
--- devel/include/linux/lockdep.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/include/linux/lockdep.h	2006-06-06 13:25:54.000000000 -0700
@@ -178,6 +178,7 @@ struct held_lock {
 	int				irq_context;
 	int				trylock;
 	int				read;
+	int				check;
 	int				hardirqs_off;
 };
 
@@ -202,15 +203,32 @@ extern void lockdep_print_held_locks(str
 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
 			     struct lockdep_type_key *key);
 
-extern void lockdep_acquire(struct lockdep_map *lock, unsigned int subtype,
-			    int trylock, int read, unsigned long ip);
+/*
+ * Acquire a lock.
+ *
+ * Values for "read":
+ *
+ *   0: exclusive (write) acquire
+ *   1: read-acquire (no recursion allowed)
+ *   2: read-acquire with same-instance recursion allowed
+ *
+ * Values for check:
+ *
+ *   0: disabled
+ *   1: simple checks (freeing, held-at-exit-time, etc.)
+ *   2: full validation
+ */
+extern void lock_acquire(struct lockdep_map *lock, unsigned int subtype,
+			 int trylock, int read, int check, unsigned long ip);
 
-extern void lockdep_release(struct lockdep_map *lock, int nested,
-			    unsigned long ip);
+extern void lock_release(struct lockdep_map *lock, int nested,
+			 unsigned long ip);
 
 # define INIT_LOCKDEP				.lockdep_recursion = 0,
 
 #else /* LOCKDEP */
+# define lock_acquire(l, s, t, r, c, i)		do { } while (0)
+# define lock_release(l, n, i)			do { } while (0)
 # define lockdep_init()				do { } while (0)
 # define lockdep_info()				do { } while (0)
 # define print_lock_types()			do { } while (0)
@@ -246,36 +264,55 @@ extern void early_boot_irqs_on(void);
  * Map the dependency ops to NOP or to real lockdep ops, depending
  * on the per lock-type debug mode:
  */
-#ifdef CONFIG_PROVE_SPIN_LOCKING
-# define spin_acquire(l, s, t, i)		lockdep_acquire(l, s, t, 0, i)
-# define spin_release(l, n, i)			lockdep_release(l, n, i)
+
+#ifdef CONFIG_DEBUG_SPINLOCK_ALLOC
+# ifdef CONFIG_PROVE_SPIN_LOCKING
+#  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
+# else
+#  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
+# endif
+# define spin_release(l, n, i)			lock_release(l, n, i)
 #else
 # define spin_acquire(l, s, t, i)		do { } while (0)
 # define spin_release(l, n, i)			do { } while (0)
 #endif
 
-#ifdef CONFIG_PROVE_RW_LOCKING
-# define rwlock_acquire(l, s, t, i)		lockdep_acquire(l, s, t, 0, i)
-# define rwlock_acquire_read(l, s, t, i)	lockdep_acquire(l, s, t, 1, i)
-# define rwlock_release(l, n, i)		lockdep_release(l, n, i)
+#ifdef CONFIG_DEBUG_RWLOCK_ALLOC
+# ifdef CONFIG_PROVE_RW_LOCKING
+#  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
+#  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 2, i)
+# else
+#  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
+#  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 1, i)
+# endif
+# define rwlock_release(l, n, i)		lock_release(l, n, i)
 #else
 # define rwlock_acquire(l, s, t, i)		do { } while (0)
 # define rwlock_acquire_read(l, s, t, i)	do { } while (0)
 # define rwlock_release(l, n, i)		do { } while (0)
 #endif
 
-#ifdef CONFIG_PROVE_MUTEX_LOCKING
-# define mutex_acquire(l, s, t, i)		lockdep_acquire(l, s, t, 0, i)
-# define mutex_release(l, n, i)			lockdep_release(l, n, i)
+#ifdef CONFIG_DEBUG_MUTEX_ALLOC
+# ifdef CONFIG_PROVE_MUTEX_LOCKING
+#  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
+# else
+#  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
+# endif
+# define mutex_release(l, n, i)			lock_release(l, n, i)
 #else
 # define mutex_acquire(l, s, t, i)		do { } while (0)
 # define mutex_release(l, n, i)			do { } while (0)
 #endif
 
-#ifdef CONFIG_PROVE_RWSEM_LOCKING
-# define rwsem_acquire(l, s, t, i)		lockdep_acquire(l, s, t, 0, i)
-# define rwsem_acquire_read(l, s, t, i)		lockdep_acquire(l, s, t, -1, i)
-# define rwsem_release(l, n, i)			lockdep_release(l, n, i)
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
+# ifdef CONFIG_PROVE_RWSEM_LOCKING
+#  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, i)
+#  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 2, i)
+# else
+#  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, i)
+#  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 1, i)
+# endif
+# define rwsem_release(l, n, i)			lock_release(l, n, i)
 #else
 # define rwsem_acquire(l, s, t, i)		do { } while (0)
 # define rwsem_acquire_read(l, s, t, i)		do { } while (0)
diff -puN include/linux/mutex.h~lock-validator-v3 include/linux/mutex.h
--- devel/include/linux/mutex.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/include/linux/mutex.h	2006-06-06 13:25:54.000000000 -0700
@@ -54,7 +54,7 @@ struct mutex {
 	const char 		*name;
 	void			*magic;
 #endif
-#ifdef CONFIG_PROVE_MUTEX_LOCKING
+#ifdef CONFIG_DEBUG_MUTEX_ALLOC
 	struct lockdep_map	dep_map;
 #endif
 };
@@ -85,7 +85,7 @@ do {							\
 # define mutex_destroy(mutex)				do { } while (0)
 #endif
 
-#ifdef CONFIG_PROVE_MUTEX_LOCKING
+#ifdef CONFIG_DEBUG_MUTEX_ALLOC
 # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
 		, .dep_map = { .name = #lockname }
 #else
@@ -125,7 +125,7 @@ static inline int fastcall mutex_is_lock
 extern void fastcall mutex_lock(struct mutex *lock);
 extern int fastcall mutex_lock_interruptible(struct mutex *lock);
 
-#ifdef CONFIG_PROVE_MUTEX_LOCKING
+#ifdef CONFIG_DEBUG_MUTEX_ALLOC
 extern void mutex_lock_nested(struct mutex *lock, unsigned int subtype);
 #else
 # define mutex_lock_nested(lock, subtype) mutex_lock(lock)
diff -puN include/linux/rwsem.h~lock-validator-v3 include/linux/rwsem.h
--- devel/include/linux/rwsem.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/include/linux/rwsem.h	2006-06-06 13:25:54.000000000 -0700
@@ -30,7 +30,7 @@ struct rw_semaphore;
  * Lockdep: type splitting can also be done for dynamic locks, if for
  * example there are per-CPU dynamically allocated locks:
  */
-#ifdef CONFIG_PROVE_RWSEM_LOCKING
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
 #define init_rwsem_key(sem, key)				\
 	__init_rwsem((sem), #sem, key)
 #else
@@ -100,7 +100,7 @@ static inline void down_write(struct rw_
 /*
  * lock for writing
  */
-#ifdef CONFIG_PROVE_RWSEM_LOCKING
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
 static inline void down_write_nested(struct rw_semaphore *sem, int subtype)
 {
 	might_sleep();
diff -puN include/linux/rwsem-spinlock.h~lock-validator-v3 include/linux/rwsem-spinlock.h
--- devel/include/linux/rwsem-spinlock.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/include/linux/rwsem-spinlock.h	2006-06-06 13:25:54.000000000 -0700
@@ -35,7 +35,7 @@ struct rw_semaphore {
 #if RWSEM_DEBUG
 	int			debug;
 #endif
-#ifdef CONFIG_PROVE_RWSEM_LOCKING
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
 	struct lockdep_map dep_map;
 #endif
 };
@@ -49,7 +49,7 @@ struct rw_semaphore {
 #define __RWSEM_DEBUG_INIT	/* */
 #endif
 
-#ifdef CONFIG_PROVE_RWSEM_LOCKING
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
 # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
 #else
 # define __RWSEM_DEP_MAP_INIT(lockname)
diff -puN include/linux/sched.h~lock-validator-v3 include/linux/sched.h
--- devel/include/linux/sched.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/include/linux/sched.h	2006-06-06 13:25:54.000000000 -0700
@@ -940,6 +940,7 @@ struct task_struct {
 	struct held_lock held_locks[MAX_LOCK_DEPTH];
 #endif
 	unsigned int lockdep_recursion;
+	int lockdep_verbose;
 
 /* journalling filesystem info */
 	void *journal_info;
diff -puN include/linux/spinlock.h~lock-validator-v3 include/linux/spinlock.h
--- devel/include/linux/spinlock.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/include/linux/spinlock.h	2006-06-06 13:25:54.000000000 -0700
@@ -88,7 +88,7 @@ extern int __lockfunc generic__raw_read_
 # include <linux/spinlock_up.h>
 #endif
 
-#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PROVE_SPIN_LOCKING)
+#ifdef CONFIG_DEBUG_SPINLOCK
   extern void __spin_lock_init(spinlock_t *lock, const char *name,
 			       struct lockdep_type_key *key);
 # define spin_lock_init(lock)					\
@@ -123,7 +123,7 @@ do {								\
 	do { spin_lock_init(lock); (void)(key); } while (0)
 #endif
 
-#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PROVE_RW_LOCKING)
+#ifdef CONFIG_DEBUG_SPINLOCK
   extern void __rwlock_init(rwlock_t *lock, const char *name,
 			    struct lockdep_type_key *key);
 # define rwlock_init(lock)					\
@@ -152,9 +152,7 @@ do {								\
 /*
  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
  */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
-	defined(CONFIG_PROVE_SPIN_LOCKING) || \
-	defined(CONFIG_PROVE_RW_LOCKING)
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 # include <linux/spinlock_api_smp.h>
 #else
 # include <linux/spinlock_api_up.h>
@@ -203,9 +201,7 @@ do {								\
 #define write_lock(lock)		_write_lock(lock)
 #define read_lock(lock)			_read_lock(lock)
 
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
-	defined(CONFIG_PROVE_SPIN_LOCKING) || \
-	defined(CONFIG_PROVE_RW_LOCKING)
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 #define spin_lock_irqsave(lock, flags)	flags = _spin_lock_irqsave(lock)
 #define read_lock_irqsave(lock, flags)	flags = _read_lock_irqsave(lock)
 #define write_lock_irqsave(lock, flags)	flags = _write_lock_irqsave(lock)
@@ -228,9 +224,7 @@ do {								\
  * We inline the unlock functions in the nondebug case:
  */
 #if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
-	!defined(CONFIG_SMP) || \
-	defined(CONFIG_PROVE_SPIN_LOCKING) || \
-	defined(CONFIG_PROVE_RW_LOCKING)
+	!defined(CONFIG_SMP)
 # define spin_unlock(lock)		_spin_unlock(lock)
 # define spin_unlock_non_nested(lock)	_spin_unlock_non_nested(lock)
 # define read_unlock(lock)		_read_unlock(lock)
diff -puN include/linux/spinlock_types.h~lock-validator-v3 include/linux/spinlock_types.h
--- devel/include/linux/spinlock_types.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/include/linux/spinlock_types.h	2006-06-06 13:25:54.000000000 -0700
@@ -26,7 +26,7 @@ typedef struct {
 	unsigned int magic, owner_cpu;
 	void *owner;
 #endif
-#ifdef CONFIG_PROVE_SPIN_LOCKING
+#ifdef CONFIG_DEBUG_SPINLOCK_ALLOC
 	struct lockdep_map dep_map;
 #endif
 } spinlock_t;
@@ -42,7 +42,7 @@ typedef struct {
 	unsigned int magic, owner_cpu;
 	void *owner;
 #endif
-#ifdef CONFIG_PROVE_RW_LOCKING
+#ifdef CONFIG_DEBUG_RWLOCK_ALLOC
 	struct lockdep_map dep_map;
 #endif
 } rwlock_t;
@@ -51,13 +51,13 @@ typedef struct {
 
 #define SPINLOCK_OWNER_INIT	((void *)-1L)
 
-#ifdef CONFIG_PROVE_SPIN_LOCKING
+#ifdef CONFIG_DEBUG_SPINLOCK_ALLOC
 # define SPIN_DEP_MAP_INIT(lockname)	.dep_map = { .name = #lockname }
 #else
 # define SPIN_DEP_MAP_INIT(lockname)
 #endif
 
-#ifdef CONFIG_PROVE_RW_LOCKING
+#ifdef CONFIG_DEBUG_RWLOCK_ALLOC
 # define RW_DEP_MAP_INIT(lockname)	.dep_map = { .name = #lockname }
 #else
 # define RW_DEP_MAP_INIT(lockname)
diff -puN include/linux/spinlock_types_up.h~lock-validator-v3 include/linux/spinlock_types_up.h
--- devel/include/linux/spinlock_types_up.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/include/linux/spinlock_types_up.h	2006-06-06 13:25:54.000000000 -0700
@@ -13,12 +13,12 @@
  */
 
 #if defined(CONFIG_DEBUG_SPINLOCK) || \
-	defined(CONFIG_PROVE_SPIN_LOCKING) || \
-	defined(CONFIG_PROVE_RW_LOCKING)
+	defined(CONFIG_DEBUG_SPINLOCK_ALLOC) || \
+	defined(CONFIG_DEBUG_RWLOCK_ALLOC)
 
 typedef struct {
 	volatile unsigned int slock;
-#ifdef CONFIG_PROVE_SPIN_LOCKING
+#ifdef CONFIG_DEBUG_SPINLOCK_ALLOC
 	struct lockdep_map dep_map;
 #endif
 } raw_spinlock_t;
@@ -35,7 +35,7 @@ typedef struct { } raw_spinlock_t;
 
 typedef struct {
 	/* no debug version on UP */
-#ifdef CONFIG_PROVE_RW_LOCKING
+#ifdef CONFIG_DEBUG_RWLOCK_ALLOC
 	struct lockdep_map dep_map;
 #endif
 } raw_rwlock_t;
diff -puN include/linux/spinlock_up.h~lock-validator-v3 include/linux/spinlock_up.h
--- devel/include/linux/spinlock_up.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/include/linux/spinlock_up.h	2006-06-06 13:25:54.000000000 -0700
@@ -17,10 +17,7 @@
  * No atomicity anywhere, we are on UP.
  */
 
-#if defined(CONFIG_DEBUG_SPINLOCK) || \
-	defined(CONFIG_PROVE_SPIN_LOCKING) || \
-	defined(CONFIG_PROVE_RW_LOCKING)
-
+#ifdef CONFIG_DEBUG_SPINLOCK
 #define __raw_spin_is_locked(x)		((x)->slock == 0)
 
 static inline void __raw_spin_lock(raw_spinlock_t *lock)
diff -puN kernel/lockdep.c~lock-validator-v3 kernel/lockdep.c
--- devel/kernel/lockdep.c~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/kernel/lockdep.c	2006-06-06 13:25:54.000000000 -0700
@@ -164,6 +164,8 @@ static int verbose(struct lock_type *typ
 	return 0;
 }
 
+#ifdef CONFIG_TRACE_IRQFLAGS
+
 static int hardirq_verbose(struct lock_type *type)
 {
 #if HARDIRQ_VERBOSE
@@ -180,6 +182,8 @@ static int softirq_verbose(struct lock_t
 	return 0;
 }
 
+#endif
+
 /*
  * Stack-trace: tightly packed array of stack backtrace
  * addresses. Protected by the hash_lock.
@@ -390,16 +394,16 @@ static void print_lock(struct held_lock 
 
 void lockdep_print_held_locks(struct task_struct *curr)
 {
-	int i;
+	int i, depth = curr->lockdep_depth;
 
-	if (!curr->lockdep_depth) {
+	if (!depth) {
 		printk("no locks held by %s/%d.\n", curr->comm, curr->pid);
 		return;
 	}
-	printk("%d locks held by %s/%d:\n",
-		curr->lockdep_depth, curr->comm, curr->pid);
+	printk("%d lock%s held by %s/%d:\n",
+		depth, depth > 1 ? "s" : "", curr->comm, curr->pid);
 
-	for (i = 0; i < curr->lockdep_depth; i++) {
+	for (i = 0; i < depth; i++) {
 		printk(" #%d: ", i);
 		print_lock(curr->held_locks + i);
 	}
@@ -889,7 +893,7 @@ check_deadlock(struct task_struct *curr,
 		 * Allow read-after-read recursion of the same
 		 * lock instance (i.e. read_lock(lock)+read_lock(lock)):
 		 */
-		if ((read > 0) && prev->read &&
+		if ((read == 2) && prev->read &&
 				(prev->instance == next_instance))
 			return 2;
 		return print_deadlock_bug(curr, prev, next);
@@ -988,7 +992,7 @@ check_prev_add(struct task_struct *curr,
 	 * write-lock never takes any other locks, then the reads are
 	 * equivalent to a NOP.
 	 */
-	if (next->read == 1 || prev->read == 1)
+	if (next->read == 2 || prev->read == 2)
 		return 1;
 	/*
 	 * Is the <prev> -> <next> dependency already present?
@@ -1997,9 +2001,9 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
  * This gets called for every mutex_lock*()/spin_lock*() operation.
  * We maintain the dependency maps and validate the locking attempt:
  */
-static int __lockdep_acquire(struct lockdep_map *lock, unsigned int subtype,
-			     int trylock, int read, int hardirqs_off,
-			     unsigned long ip)
+static int __lock_acquire(struct lockdep_map *lock, unsigned int subtype,
+			  int trylock, int read, int check, int hardirqs_off,
+			  unsigned long ip)
 {
 	struct task_struct *curr = current;
 	struct held_lock *hlock;
@@ -2046,8 +2050,11 @@ static int __lockdep_acquire(struct lock
 	hlock->instance = lock;
 	hlock->trylock = trylock;
 	hlock->read = read;
+	hlock->check = check;
 	hlock->hardirqs_off = hardirqs_off;
 
+	if (check != 2)
+		goto out_calc_hash;
 #ifdef CONFIG_TRACE_IRQFLAGS
 	/*
 	 * If non-trylock use in a hardirq or softirq context, then
@@ -2095,6 +2102,7 @@ static int __lockdep_acquire(struct lock
 	/* mark it as used: */
 	if (!mark_lock(curr, hlock, LOCK_USED, ip))
 		return 0;
+out_calc_hash:
 	/*
 	 * Calculate the chain hash: it's the combined has of all the
 	 * lock keys along the dependency chain. We save the hash value
@@ -2152,7 +2160,7 @@ static int __lockdep_acquire(struct lock
 	 * (If lookup_chain_cache() returns with 1 it acquires
 	 * hash_lock for us)
 	 */
-	if (!trylock && lookup_chain_cache(chain_key)) {
+	if (!trylock && (check == 2) && lookup_chain_cache(chain_key)) {
 		/*
 		 * Check whether last held lock:
 		 *
@@ -2205,7 +2213,8 @@ static int
 print_unlock_order_bug(struct task_struct *curr, struct lockdep_map *lock,
 		       struct held_lock *hlock, unsigned long ip)
 {
-	debug_locks_off();
+	if (!debug_locks_off())
+		return 0;
 	if (debug_locks_silent)
 		return 0;
 
@@ -2235,7 +2244,8 @@ static int
 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
 			   unsigned long ip)
 {
-	debug_locks_off();
+	if (!debug_locks_off())
+		return 0;
 	if (debug_locks_silent)
 		return 0;
 
@@ -2278,11 +2288,11 @@ static int check_unlock(struct task_stru
  * Remove the lock to the list of currently held locks in a
  * potentially non-nested (out of order) manner. This is a
  * relatively rare operation, as all the unlock APIs default
- * to nested mode (which uses lockdep_release()):
+ * to nested mode (which uses lock_release()):
  */
 static int
-lockdep_release_non_nested(struct task_struct *curr,
-			   struct lockdep_map *lock, unsigned long ip)
+lock_release_non_nested(struct task_struct *curr,
+			struct lockdep_map *lock, unsigned long ip)
 {
 	struct held_lock *hlock, *prev_hlock;
 	unsigned int depth;
@@ -2321,9 +2331,9 @@ found_it:
 
 	for (i++; i < depth; i++) {
 		hlock = curr->held_locks + i;
-		if (!__lockdep_acquire(hlock->instance,
+		if (!__lock_acquire(hlock->instance,
 			hlock->type->subtype, hlock->trylock,
-				hlock->read, hlock->hardirqs_off,
+				hlock->read, hlock->check, hlock->hardirqs_off,
 				hlock->acquire_ip))
 			return 0;
 	}
@@ -2339,8 +2349,8 @@ found_it:
  * mutex_lock_interruptible()). This is done for unlocks that nest
  * perfectly. (i.e. the current top of the lock-stack is unlocked)
  */
-static int lockdep_release_nested(struct task_struct *curr,
-				  struct lockdep_map *lock, unsigned long ip)
+static int lock_release_nested(struct task_struct *curr,
+			       struct lockdep_map *lock, unsigned long ip)
 {
 	struct held_lock *hlock;
 	unsigned int depth;
@@ -2358,7 +2368,7 @@ static int lockdep_release_nested(struct
 #ifdef CONFIG_DEBUG_NON_NESTED_UNLOCKS
 		return print_unlock_order_bug(curr, lock, hlock, ip);
 #else
-		return lockdep_release_non_nested(curr, lock, ip);
+		return lock_release_non_nested(curr, lock, ip);
 #endif
 	}
 	curr->lockdep_depth--;
@@ -2383,8 +2393,8 @@ static int lockdep_release_nested(struct
  * mutex_lock_interruptible()). This is done for unlocks that nest
  * perfectly. (i.e. the current top of the lock-stack is unlocked)
  */
-static void __lockdep_release(struct lockdep_map *lock, int nested,
-			      unsigned long ip)
+static void
+__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
 {
 	struct task_struct *curr = current;
 
@@ -2392,10 +2402,10 @@ static void __lockdep_release(struct loc
 		return;
 
 	if (nested) {
-		if (!lockdep_release_nested(curr, lock, ip))
+		if (!lock_release_nested(curr, lock, ip))
 			return;
 	} else {
-		if (!lockdep_release_non_nested(curr, lock, ip))
+		if (!lock_release_non_nested(curr, lock, ip))
 			return;
 	}
 
@@ -2437,8 +2447,8 @@ static void check_flags(unsigned long fl
  * We are not always called with irqs disabled - do that here,
  * and also avoid lockdep recursion:
  */
-void lockdep_acquire(struct lockdep_map *lock, unsigned int subtype,
-		     int trylock, int read, unsigned long ip)
+void lock_acquire(struct lockdep_map *lock, unsigned int subtype,
+		  int trylock, int read, int check, unsigned long ip)
 {
 	unsigned long flags;
 
@@ -2451,15 +2461,16 @@ void lockdep_acquire(struct lockdep_map 
 	if (unlikely(current->lockdep_recursion))
 		goto out;
 	current->lockdep_recursion = 1;
-	__lockdep_acquire(lock, subtype, trylock, read, irqs_disabled_flags(flags), ip);
+	__lock_acquire(lock, subtype, trylock, read, check,
+		       irqs_disabled_flags(flags), ip);
 	current->lockdep_recursion = 0;
 out:
 	raw_local_irq_restore(flags);
 }
 
-EXPORT_SYMBOL_GPL(lockdep_acquire);
+EXPORT_SYMBOL_GPL(lock_acquire);
 
-void lockdep_release(struct lockdep_map *lock, int nested, unsigned long ip)
+void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
 {
 	unsigned long flags;
 
@@ -2471,13 +2482,13 @@ void lockdep_release(struct lockdep_map 
 	if (unlikely(current->lockdep_recursion))
 		goto out;
 	current->lockdep_recursion = 1;
-	__lockdep_release(lock, nested, ip);
+	__lock_release(lock, nested, ip);
 	current->lockdep_recursion = 0;
 out:
 	raw_local_irq_restore(flags);
 }
 
-EXPORT_SYMBOL_GPL(lockdep_release);
+EXPORT_SYMBOL_GPL(lock_release);
 
 /*
  * Used by the testsuite, sanitize the validator state
@@ -2648,3 +2659,139 @@ void __init lockdep_info(void)
 #endif
 }
 
+static inline int in_range(const void *start, const void *addr, const void *end)
+{
+	return addr >= start && addr <= end;
+}
+
+static void
+print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
+		     const void *mem_to)
+{
+	if (!debug_locks_off())
+		return;
+	if (debug_locks_silent)
+		return;
+
+	printk("\n=========================\n");
+	printk(  "[ BUG: held lock freed! ]\n");
+	printk(  "-------------------------\n");
+	printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
+		curr->comm, curr->pid, mem_from, mem_to-1);
+	lockdep_print_held_locks(curr);
+
+	printk("\nstack backtrace:\n");
+	dump_stack();
+}
+
+/*
+ * Called when kernel memory is freed (or unmapped), or if a lock
+ * is destroyed or reinitialized - this code checks whether there is
+ * any held lock in the memory range of <from> to <to>:
+ */
+void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
+{
+	const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
+	struct task_struct *curr = current;
+	struct held_lock *hlock;
+	unsigned long flags;
+	int i;
+
+	if (unlikely(!debug_locks))
+		return;
+
+	local_irq_save(flags);
+	for (i = 0; i < curr->lockdep_depth; i++) {
+		hlock = curr->held_locks + i;
+
+		lock_from = (void *)hlock->instance;
+		lock_to = (void *)(hlock->instance + 1);
+
+		if (!in_range(mem_from, lock_from, mem_to) &&
+					!in_range(mem_from, lock_to, mem_to))
+			continue;
+
+		print_freed_lock_bug(curr, mem_from, mem_to);
+		break;
+	}
+	local_irq_restore(flags);
+}
+
+static void print_held_locks_bug(struct task_struct *curr)
+{
+	if (!debug_locks_off())
+		return;
+	if (debug_locks_silent)
+		return;
+
+	printk("\n=====================================\n");
+	printk(  "[ BUG: lock held at task exit time! ]\n");
+	printk(  "-------------------------------------\n");
+	printk("%s/%d is exiting with locks still held!\n",
+		curr->comm, curr->pid);
+	lockdep_print_held_locks(curr);
+
+	printk("\nstack backtrace:\n");
+	dump_stack();
+}
+
+void debug_check_no_locks_held(struct task_struct *task)
+{
+	if (unlikely(task->lockdep_depth > 0))
+		print_held_locks_bug(task);
+}
+
+void debug_show_all_locks(void)
+{
+	struct task_struct *g, *p;
+	int count = 10;
+	int unlock = 1;
+
+	printk("\nShowing all locks held in the system:\n");
+
+	/*
+	 * Here we try to get the tasklist_lock as hard as possible,
+	 * if not successful after 2 seconds we ignore it (but keep
+	 * trying). This is to enable a debug printout even if a
+	 * tasklist_lock-holding task deadlocks or crashes.
+	 */
+retry:
+	if (!read_trylock(&tasklist_lock)) {
+		if (count == 10)
+			printk("hm, tasklist_lock locked, retrying... ");
+		if (count) {
+			count--;
+			printk(" #%d", 10-count);
+			mdelay(200);
+			goto retry;
+		}
+		printk(" ignoring it.\n");
+		unlock = 0;
+	}
+	if (count != 10)
+		printk(" locked it.\n");
+
+	do_each_thread(g, p) {
+		if (p->lockdep_depth)
+			lockdep_print_held_locks(p);
+		if (!unlock)
+			if (read_trylock(&tasklist_lock))
+				unlock = 1;
+	} while_each_thread(g, p);
+
+	printk("\n");
+	printk("=============================================\n\n");
+
+	if (unlock)
+		read_unlock(&tasklist_lock);
+}
+
+EXPORT_SYMBOL_GPL(debug_show_all_locks);
+
+void debug_show_held_locks(struct task_struct *task)
+{
+	lockdep_print_held_locks(task);
+}
+
+EXPORT_SYMBOL_GPL(debug_show_held_locks);
+
diff -puN kernel/mutex.c~lock-validator-v3 kernel/mutex.c
--- devel/kernel/mutex.c~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/kernel/mutex.c	2006-06-06 13:25:54.000000000 -0700
@@ -27,13 +27,8 @@
 # include "mutex-debug.h"
 # include <asm-generic/mutex-null.h>
 #else
-# ifdef CONFIG_PROVE_MUTEX_LOCKING
-#  include "mutex-lockdep.h"
-#  include <asm-generic/mutex-null.h>
-# else
-#  include "mutex.h"
-#  include <asm/mutex.h>
-# endif
+# include "mutex.h"
+# include <asm/mutex.h>
 #endif
 
 /***
@@ -51,11 +46,7 @@ __mutex_init(struct mutex *lock, const c
 	spin_lock_init(&lock->wait_lock);
 	INIT_LIST_HEAD(&lock->wait_list);
 
-	debug_mutex_init(lock, name);
-
-#ifdef CONFIG_PROVE_MUTEX_LOCKING
-	lockdep_init_map(&lock->dep_map, name, key);
-#endif
+	debug_mutex_init(lock, name, key);
 }
 
 EXPORT_SYMBOL(__mutex_init);
@@ -225,7 +216,7 @@ __mutex_lock_slowpath(atomic_t *lock_cou
 	__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
 }
 
-#ifdef CONFIG_PROVE_MUTEX_LOCKING
+#ifdef CONFIG_DEBUG_MUTEX_ALLOC
 void __sched
 mutex_lock_nested(struct mutex *lock, unsigned int subtype)
 {
diff -puN kernel/mutex-debug.c~lock-validator-v3 kernel/mutex-debug.c
--- devel/kernel/mutex-debug.c~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/kernel/mutex-debug.c	2006-06-06 13:25:54.000000000 -0700
@@ -83,12 +83,16 @@ void debug_mutex_unlock(struct mutex *lo
 	DEBUG_WARN_ON(lock->owner != current_thread_info());
 }
 
-void debug_mutex_init(struct mutex *lock, const char *name)
+void debug_mutex_init(struct mutex *lock, const char *name,
+		      struct lockdep_type_key *key)
 {
+#ifdef CONFIG_DEBUG_MUTEX_ALLOC
 	/*
 	 * Make sure we are not reinitializing a held lock:
 	 */
 	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+	lockdep_init_map(&lock->dep_map, name, key);
+#endif
 	lock->owner = NULL;
 	lock->magic = lock;
 }
diff -puN kernel/mutex-debug.h~lock-validator-v3 kernel/mutex-debug.h
--- devel/kernel/mutex-debug.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/kernel/mutex-debug.h	2006-06-06 13:25:54.000000000 -0700
@@ -32,7 +32,8 @@ extern void debug_mutex_add_waiter(struc
 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
 				struct thread_info *ti);
 extern void debug_mutex_unlock(struct mutex *lock);
-extern void debug_mutex_init(struct mutex *lock, const char *name);
+extern void debug_mutex_init(struct mutex *lock, const char *name,
+			     struct lockdep_type_key *key);
 
 #define spin_lock_mutex(lock, flags)			\
 	do {						\
diff -puN kernel/mutex.h~lock-validator-v3 kernel/mutex.h
--- devel/kernel/mutex.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/kernel/mutex.h	2006-06-06 13:25:54.000000000 -0700
@@ -24,7 +24,7 @@
 #define debug_mutex_free_waiter(waiter)			do { } while (0)
 #define debug_mutex_add_waiter(lock, waiter, ti)	do { } while (0)
 #define debug_mutex_unlock(lock)			do { } while (0)
-#define debug_mutex_init(lock, name)			do { } while (0)
+#define debug_mutex_init(lock, name, key)		do { } while (0)
 
 static inline void
 debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
diff -L kernel/mutex-lockdep.h -puN kernel/mutex-lockdep.h~lock-validator-v3 /dev/null
--- devel/kernel/mutex-lockdep.h
+++ /dev/null	2006-06-03 22:34:36.282200750 -0700
@@ -1,40 +0,0 @@
-/*
- * Mutexes: blocking mutual exclusion locks
- *
- * started by Ingo Molnar:
- *
- *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@xxxxxxxxxx>
- *
- * This file contains mutex debugging related internal prototypes, for the
- * !CONFIG_DEBUG_MUTEXES && CONFIG_PROVE_MUTEX_LOCKING case. Most of
- * them are NOPs:
- */
-
-#define spin_lock_mutex(lock, flags)			\
-	do {						\
-		local_irq_save(flags);			\
-		__raw_spin_lock(&(lock)->raw_lock);	\
-	} while (0)
-
-#define spin_unlock_mutex(lock, flags)			\
-	do {						\
-		__raw_spin_unlock(&(lock)->raw_lock);	\
-		local_irq_restore(flags);		\
-	} while (0)
-
-#define mutex_remove_waiter(lock, waiter, ti) \
-		__list_del((waiter)->list.prev, (waiter)->list.next)
-
-#define debug_mutex_set_owner(lock, new_owner)		do { } while (0)
-#define debug_mutex_clear_owner(lock)			do { } while (0)
-#define debug_mutex_wake_waiter(lock, waiter)		do { } while (0)
-#define debug_mutex_free_waiter(waiter)			do { } while (0)
-#define debug_mutex_add_waiter(lock, waiter, ti)	do { } while (0)
-#define debug_mutex_unlock(lock)			do { } while (0)
-#define debug_mutex_init(lock, name)			do { } while (0)
-
-static inline void
-debug_mutex_lock_common(struct mutex *lock,
-			struct mutex_waiter *waiter)
-{
-}
diff -puN kernel/spinlock.c~lock-validator-v3 kernel/spinlock.c
--- devel/kernel/spinlock.c~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/kernel/spinlock.c	2006-06-06 13:25:54.000000000 -0700
@@ -17,44 +17,6 @@
 #include <linux/debug_locks.h>
 #include <linux/module.h>
 
-#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PROVE_SPIN_LOCKING)
-void __spin_lock_init(spinlock_t *lock, const char *name,
-		      struct lockdep_type_key *key)
-{
-	lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
-#ifdef CONFIG_DEBUG_SPINLOCK
-	lock->magic = SPINLOCK_MAGIC;
-	lock->owner = SPINLOCK_OWNER_INIT;
-	lock->owner_cpu = -1;
-#endif
-#ifdef CONFIG_PROVE_SPIN_LOCKING
-	lockdep_init_map(&lock->dep_map, name, key);
-#endif
-}
-
-EXPORT_SYMBOL(__spin_lock_init);
-
-#endif
-
-#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PROVE_RW_LOCKING)
-
-void __rwlock_init(rwlock_t *lock, const char *name,
-		   struct lockdep_type_key *key)
-{
-	lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
-#ifdef CONFIG_DEBUG_SPINLOCK
-	lock->magic = RWLOCK_MAGIC;
-	lock->owner = SPINLOCK_OWNER_INIT;
-	lock->owner_cpu = -1;
-#endif
-#ifdef CONFIG_PROVE_RW_LOCKING
-	lockdep_init_map(&lock->dep_map, name, key);
-#endif
-}
-
-EXPORT_SYMBOL(__rwlock_init);
-
-#endif
 /*
  * Generic declaration of the raw read_trylock() function,
  * architectures are supposed to optimize this:
diff -puN lib/Kconfig.debug~lock-validator-v3 lib/Kconfig.debug
--- devel/lib/Kconfig.debug~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/lib/Kconfig.debug	2006-06-06 13:25:54.000000000 -0700
@@ -116,24 +116,6 @@ config DEBUG_PREEMPT
 	  if kernel code uses it in a preemption-unsafe way. Also, the kernel
 	  will detect preemption count underflows.
 
-config DEBUG_MUTEXES
-	bool "Mutex debugging, basic checks"
-	default y
-	depends on DEBUG_KERNEL
-	help
-	 This feature allows mutex semantics violations to be detected and
-	 reported.
-
-config DEBUG_MUTEX_ALLOC
-	bool "Detect incorrect freeing of live mutexes"
-	default y
-	depends on DEBUG_MUTEXES
-	help
-	 This feature will check whether any held mutex is incorrectly
-	 freed by the kernel, via any of the memory-freeing routines
-	 (kfree(), kmem_cache_free(), free_pages(), vfree(), etc.),
-	 or whether there is any lock held during task exit.
-
 config DEBUG_RT_MUTEXES
 	bool "RT Mutex debugging, deadlock detection"
 	default y
@@ -155,7 +137,7 @@ config RT_MUTEX_TESTER
 	  This option enables a rt-mutex tester.
 
 config DEBUG_SPINLOCK
-	bool "Spinlock debugging"
+	bool "Spinlock and rw-lock debugging: basic checks"
 	depends on DEBUG_KERNEL
 	help
 	  Say Y here and build SMP to catch missing spinlock initialization
@@ -163,9 +145,20 @@ config DEBUG_SPINLOCK
 	  best used in conjunction with the NMI watchdog so that spinlock
 	  deadlocks are also debuggable.
 
+config DEBUG_SPINLOCK_ALLOC
+	bool "Spinlock debugging: detect incorrect freeing of live spinlocks"
+	depends on DEBUG_SPINLOCK
+	select LOCKDEP
+	help
+	 This feature will check whether any held spinlock is incorrectly
+	 freed by the kernel, via any of the memory-freeing routines
+	 (kfree(), kmem_cache_free(), free_pages(), vfree(), etc.),
+	 whether a live spinlock is being reinitialized via spin_lock_init(),
+	 or whether there is any spinlock held during task exit.
+
 config PROVE_SPIN_LOCKING
-	bool "Prove spin-locking correctness"
-	depends on TRACE_IRQFLAGS_SUPPORT
+	bool "Spinlock debugging: prove spin-locking correctness"
+	depends on TRACE_IRQFLAGS_SUPPORT && DEBUG_SPINLOCK_ALLOC
 	default n
 	help
 	 This feature enables the kernel to prove that all spinlock
@@ -201,9 +194,20 @@ config PROVE_SPIN_LOCKING
 
 	 For more details, see Documentation/locking-correctness.txt.
 
+config DEBUG_RWLOCK_ALLOC
+	bool "rw-lock debugging: detect incorrect freeing of live rwlocks"
+	depends on DEBUG_SPINLOCK
+	select LOCKDEP
+	help
+	 This feature will check whether any held rwlock is incorrectly
+	 freed by the kernel, via any of the memory-freeing routines
+	 (kfree(), kmem_cache_free(), free_pages(), vfree(), etc.),
+	 whether a live rwlock is being reinitialized via rwlock_init(),
+	 or whether there is any rwlock held during task exit.
+
 config PROVE_RW_LOCKING
-	bool "Prove rw-locking correctness"
-	depends on TRACE_IRQFLAGS_SUPPORT
+	bool "rw-lock debugging: prove rw-locking correctness"
+	depends on TRACE_IRQFLAGS_SUPPORT && DEBUG_RWLOCK_ALLOC
 	default n
 	help
 	 This feature enables the kernel to prove that all rwlock
@@ -239,9 +243,28 @@ config PROVE_RW_LOCKING
 
 	 For more details, see Documentation/locking-correctness.txt.
 
+config DEBUG_MUTEXES
+	bool "Mutex debugging: basic checks"
+	default y
+	depends on DEBUG_KERNEL
+	help
+	 This feature allows mutex semantics violations to be detected and
+	 reported.
+
+config DEBUG_MUTEX_ALLOC
+	bool "Mutex debugging: detect incorrect freeing of live mutexes"
+	depends on DEBUG_MUTEXES
+	select LOCKDEP
+	help
+	 This feature will check whether any held mutex is incorrectly
+	 freed by the kernel, via any of the memory-freeing routines
+	 (kfree(), kmem_cache_free(), free_pages(), vfree(), etc.),
+	 whether a live mutex is being reinitialized via mutex_init(),
+	 or whether there is any mutex held during task exit.
+
 config PROVE_MUTEX_LOCKING
-	bool "Prove mutex-locking correctness"
-	depends on TRACE_IRQFLAGS_SUPPORT
+	bool "Mutex debugging: prove mutex-locking correctness"
+	depends on DEBUG_MUTEX_ALLOC
 	default n
 	help
 	 This feature enables the kernel to prove that all mutexlock
@@ -277,9 +300,27 @@ config PROVE_MUTEX_LOCKING
 
 	 For more details, see Documentation/locking-correctness.txt.
 
+config DEBUG_RWSEMS
+	bool "rwsem debugging: basic checks"
+	depends on DEBUG_KERNEL
+	help
+	 This feature allows read-write semaphore semantics violations to
+	 be detected and reported.
+
+config DEBUG_RWSEM_ALLOC
+	bool "rwsem debugging: detect incorrect freeing of live rwsems"
+	depends on DEBUG_RWSEMS
+	select LOCKDEP
+	help
+	 This feature will check whether any held rwsem is incorrectly
+	 freed by the kernel, via any of the memory-freeing routines
+	 (kfree(), kmem_cache_free(), free_pages(), vfree(), etc.),
+	 whether a live rwsem is being reinitialized via init_rwsem(),
+	 or whether there is any rwsem held during task exit.
+
 config PROVE_RWSEM_LOCKING
 	bool "Prove rwsem-locking correctness"
-	depends on TRACE_IRQFLAGS_SUPPORT
+	depends on DEBUG_RWSEM_ALLOC
 	default n
 	help
 	 This feature enables the kernel to prove that all rwsemlock
@@ -317,11 +358,9 @@ config PROVE_RWSEM_LOCKING
 
 config LOCKDEP
 	bool
-	default y
 	select FRAME_POINTER
 	select KALLSYMS
 	select KALLSYMS_ALL
-	depends on PROVE_SPIN_LOCKING || PROVE_RW_LOCKING || PROVE_MUTEX_LOCKING || PROVE_RWSEM_LOCKING
 
 config DEBUG_NON_NESTED_UNLOCKS
 	bool "Detect non-nested unlocks"
@@ -340,7 +379,6 @@ config DEBUG_LOCKDEP
 	bool "Lock dependency engine debugging"
 	depends on LOCKDEP
 	default y
-	depends on TRACE_IRQFLAGS_SUPPORT
 	help
 	  If you say Y here, the lock dependency engine will do
 	  additional runtime checks to debug itself, at the price
@@ -353,7 +391,7 @@ config TRACE_IRQFLAGS
 	depends on PROVE_SPIN_LOCKING || PROVE_RW_LOCKING
 
 config DEBUG_SPINLOCK_SLEEP
-	bool "Sleep-inside-spinlock checking"
+	bool "Spinlock debugging: sleep-inside-spinlock checking"
 	depends on DEBUG_KERNEL
 	help
 	  If you say Y here, various routines which may sleep will become very
diff -puN lib/locking-selftest.c~lock-validator-v3 lib/locking-selftest.c
--- devel/lib/locking-selftest.c~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/lib/locking-selftest.c	2006-06-06 13:25:54.000000000 -0700
@@ -173,6 +173,7 @@ static void init_shared_types(void)
 #define U(x)			spin_unlock(&lock_##x)
 #define UNN(x)			spin_unlock_non_nested(&lock_##x)
 #define LU(x)			L(x); U(x)
+#define SI(x)			spin_lock_init(&lock_##x)
 
 #define WL(x)			write_lock(&rwlock_##x)
 #define WU(x)			write_unlock(&rwlock_##x)
@@ -182,10 +183,12 @@ static void init_shared_types(void)
 #define RU(x)			read_unlock(&rwlock_##x)
 #define RUNN(x)			read_unlock_non_nested(&rwlock_##x)
 #define RLU(x)			RL(x); RU(x)
+#define RWI(x)			rwlock_init(&rwlock_##x)
 
 #define ML(x)			mutex_lock(&mutex_##x)
 #define MU(x)			mutex_unlock(&mutex_##x)
 #define MUNN(x)			mutex_unlock_non_nested(&mutex_##x)
+#define MI(x)			mutex_init(&mutex_##x)
 
 #define WSL(x)			down_write(&rwsem_##x)
 #define WSU(x)			up_write(&rwsem_##x)
@@ -193,6 +196,7 @@ static void init_shared_types(void)
 #define RSL(x)			down_read(&rwsem_##x)
 #define RSU(x)			up_read(&rwsem_##x)
 #define RSUNN(x)		up_read_non_nested(&rwsem_##x)
+#define RWSI(x)			init_rwsem(&rwsem_##x)
 
 #define LOCK_UNLOCK_2(x,y)	LOCK(x); LOCK(y); UNLOCK(y); UNLOCK(x)
 
@@ -226,9 +230,7 @@ static void name##_321(void) { E3(); E2(
 #define E()					\
 						\
 	LOCK(X1);				\
-	LOCK(X2); /* this one should fail */	\
-	UNLOCK(X2);				\
-	UNLOCK(X1);
+	LOCK(X2); /* this one should fail */
 
 /*
  * 6 testcases:
@@ -256,16 +258,50 @@ static void rlock_AA1(void)
 {
 	RL(X1);
 	RL(X1); // this one should NOT fail
-	RU(X1);
-	RU(X1);
+}
+
+static void rlock_AA1B(void)
+{
+	RL(X1);
+	RL(X2); // this one should fail
 }
 
 static void rsem_AA1(void)
 {
 	RSL(X1);
 	RSL(X1); // this one should fail
-	RSU(X1);
-	RSU(X1);
+}
+
+static void rsem_AA1B(void)
+{
+	RSL(X1);
+	RSL(X2); // this one should fail
+}
+/*
+ * The mixing of read and write locks is not allowed:
+ */
+static void rlock_AA2(void)
+{
+	RL(X1);
+	WL(X2); // this one should fail
+}
+
+static void rsem_AA2(void)
+{
+	RSL(X1);
+	WSL(X2); // this one should fail
+}
+
+static void rlock_AA3(void)
+{
+	WL(X1);
+	RL(X2); // this one should fail
+}
+
+static void rsem_AA3(void)
+{
+	WSL(X1);
+	RSL(X2); // this one should fail
 }
 
 /*
@@ -491,6 +527,32 @@ GENERATE_TESTCASE(bad_unlock_order_rsem)
 
 #undef E
 
+/*
+ * initializing a held lock:
+ */
+#define E()					\
+						\
+	LOCK(A);				\
+	INIT(A); /* fail */
+
+/*
+ * 6 testcases:
+ */
+#include "locking-selftest-spin.h"
+GENERATE_TESTCASE(init_held_spin)
+#include "locking-selftest-wlock.h"
+GENERATE_TESTCASE(init_held_wlock)
+#include "locking-selftest-rlock.h"
+GENERATE_TESTCASE(init_held_rlock)
+#include "locking-selftest-mutex.h"
+GENERATE_TESTCASE(init_held_mutex)
+#include "locking-selftest-wsem.h"
+GENERATE_TESTCASE(init_held_wsem)
+#include "locking-selftest-rsem.h"
+GENERATE_TESTCASE(init_held_rsem)
+
+#undef E
+
 #ifdef CONFIG_LOCKDEP
 /*
  * bad unlock ordering - but using the _non_nested API,
@@ -871,25 +933,25 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
 #include "locking-selftest-softirq.h"
 // GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft)
 
-#ifdef CONFIG_PROVE_SPIN_LOCKING
+#ifdef CONFIG_DEBUG_SPINLOCK_ALLOC
 # define I_SPINLOCK(x)	lockdep_reset_lock(&lock_##x.dep_map)
 #else
 # define I_SPINLOCK(x)
 #endif
 
-#ifdef CONFIG_PROVE_RW_LOCKING
+#ifdef CONFIG_DEBUG_RWLOCK_ALLOC
 # define I_RWLOCK(x)	lockdep_reset_lock(&rwlock_##x.dep_map)
 #else
 # define I_RWLOCK(x)
 #endif
 
-#ifdef CONFIG_PROVE_MUTEX_LOCKING
+#ifdef CONFIG_DEBUG_MUTEX_ALLOC
 # define I_MUTEX(x)	lockdep_reset_lock(&mutex_##x.dep_map)
 #else
 # define I_MUTEX(x)
 #endif
 
-#ifdef CONFIG_PROVE_RWSEM_LOCKING
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
 # define I_RWSEM(x)	lockdep_reset_lock(&rwsem_##x.dep_map)
 #else
 # define I_RWSEM(x)
@@ -1139,6 +1201,7 @@ void locking_selftest(void)
 	DO_TESTCASE_6R("A-B-C-D-B-D-D-A deadlock", ABCDBDDA);
 	DO_TESTCASE_6R("A-B-C-D-B-C-D-A deadlock", ABCDBCDA);
 	DO_TESTCASE_6("double unlock", double_unlock);
+	DO_TESTCASE_6("initialize held", init_held);
 #ifdef CONFIG_DEBUG_NON_NESTED_UNLOCKS
 	DO_TESTCASE_6("bad unlock order", bad_unlock_order);
 #else
@@ -1153,8 +1216,30 @@ void locking_selftest(void)
 	dotest(rsem_AA1, FAILURE, LOCKTYPE_RWSEM);
 	printk("\n");
 
+	print_testname("recursive read-lock #2");
+	printk("             |");
+	dotest(rlock_AA1B, FAILURE, LOCKTYPE_RWLOCK);
+	printk("             |");
+	dotest(rsem_AA1B, FAILURE, LOCKTYPE_RWSEM);
+	printk("\n");
+
+	print_testname("mixed read-write-lock");
+	printk("             |");
+	dotest(rlock_AA2, FAILURE, LOCKTYPE_RWLOCK);
+	printk("             |");
+	dotest(rsem_AA2, FAILURE, LOCKTYPE_RWSEM);
+	printk("\n");
+
+	print_testname("mixed write-read-lock");
+	printk("             |");
+	dotest(rlock_AA3, FAILURE, LOCKTYPE_RWLOCK);
+	printk("             |");
+	dotest(rsem_AA3, FAILURE, LOCKTYPE_RWSEM);
+	printk("\n");
+
 	printk("  --------------------------------------------------------------------------\n");
 
+
 #ifdef CONFIG_LOCKDEP
 	print_testname("non-nested unlock");
 	dotest(spin_order_nn, SUCCESS, LOCKTYPE_SPIN);
diff -puN lib/locking-selftest-mutex.h~lock-validator-v3 lib/locking-selftest-mutex.h
--- devel/lib/locking-selftest-mutex.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/lib/locking-selftest-mutex.h	2006-06-06 13:25:54.000000000 -0700
@@ -3,3 +3,9 @@
 
 #undef UNLOCK
 #define UNLOCK		MU
+
+#undef RLOCK
+#undef WLOCK
+
+#undef INIT
+#define INIT		MI
diff -puN lib/locking-selftest-rlock.h~lock-validator-v3 lib/locking-selftest-rlock.h
--- devel/lib/locking-selftest-rlock.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/lib/locking-selftest-rlock.h	2006-06-06 13:25:54.000000000 -0700
@@ -3,3 +3,12 @@
 
 #undef UNLOCK
 #define UNLOCK		RU
+
+#undef RLOCK
+#define RLOCK		RL
+
+#undef WLOCK
+#define WLOCK		WL
+
+#undef INIT
+#define INIT		RWI
diff -puN lib/locking-selftest-rsem.h~lock-validator-v3 lib/locking-selftest-rsem.h
--- devel/lib/locking-selftest-rsem.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/lib/locking-selftest-rsem.h	2006-06-06 13:25:54.000000000 -0700
@@ -3,3 +3,12 @@
 
 #undef UNLOCK
 #define UNLOCK		RSU
+
+#undef RLOCK
+#define RLOCK		RSL
+
+#undef WLOCK
+#define WLOCK		WSL
+
+#undef INIT
+#define INIT		RWSI
diff -puN lib/locking-selftest-spin.h~lock-validator-v3 lib/locking-selftest-spin.h
--- devel/lib/locking-selftest-spin.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/lib/locking-selftest-spin.h	2006-06-06 13:25:54.000000000 -0700
@@ -3,3 +3,9 @@
 
 #undef UNLOCK
 #define UNLOCK		U
+
+#undef RLOCK
+#undef WLOCK
+
+#undef INIT
+#define INIT		SI
diff -puN lib/locking-selftest-wlock.h~lock-validator-v3 lib/locking-selftest-wlock.h
--- devel/lib/locking-selftest-wlock.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/lib/locking-selftest-wlock.h	2006-06-06 13:25:54.000000000 -0700
@@ -3,3 +3,12 @@
 
 #undef UNLOCK
 #define UNLOCK		WU
+
+#undef RLOCK
+#define RLOCK		RL
+
+#undef WLOCK
+#define WLOCK		WL
+
+#undef INIT
+#define INIT		RWI
diff -puN lib/locking-selftest-wsem.h~lock-validator-v3 lib/locking-selftest-wsem.h
--- devel/lib/locking-selftest-wsem.h~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/lib/locking-selftest-wsem.h	2006-06-06 13:25:54.000000000 -0700
@@ -3,3 +3,12 @@
 
 #undef UNLOCK
 #define UNLOCK		WSU
+
+#undef RLOCK
+#define RLOCK		RSL
+
+#undef WLOCK
+#define WLOCK		WSL
+
+#undef INIT
+#define INIT		RWSI
diff -puN lib/rwsem.c~lock-validator-v3 lib/rwsem.c
--- devel/lib/rwsem.c~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/lib/rwsem.c	2006-06-06 13:25:54.000000000 -0700
@@ -14,15 +14,19 @@
 void __init_rwsem(struct rw_semaphore *sem, const char *name,
 		  struct lockdep_type_key *key)
 {
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held semaphore:
+	 */
+	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
+	lockdep_init_map(&sem->dep_map, name, key);
+#endif
 	sem->count = RWSEM_UNLOCKED_VALUE;
 	spin_lock_init(&sem->wait_lock);
 	INIT_LIST_HEAD(&sem->wait_list);
 #if RWSEM_DEBUG
 	sem->debug = 0;
 #endif
-#ifdef CONFIG_PROVE_RWSEM_LOCKING
-	lockdep_init_map(&sem->dep_map, name, key);
-#endif
 }
 
 EXPORT_SYMBOL(__init_rwsem);
diff -puN lib/rwsem-spinlock.c~lock-validator-v3 lib/rwsem-spinlock.c
--- devel/lib/rwsem-spinlock.c~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/lib/rwsem-spinlock.c	2006-06-06 13:25:54.000000000 -0700
@@ -33,15 +33,19 @@ void rwsemtrace(struct rw_semaphore *sem
 void __init_rwsem(struct rw_semaphore *sem, const char *name,
 		  struct lockdep_type_key *key)
 {
+#ifdef CONFIG_DEBUG_RWSEM_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held semaphore:
+	 */
+	debug_check_no_locks_freed((void *)sem, sizeof(*sem));
+	lockdep_init_map(&sem->dep_map, name, key);
+#endif
 	sem->activity = 0;
 	spin_lock_init(&sem->wait_lock);
 	INIT_LIST_HEAD(&sem->wait_list);
 #if RWSEM_DEBUG
 	sem->debug = 0;
 #endif
-#ifdef CONFIG_PROVE_RWSEM_LOCKING
-	lockdep_init_map(&sem->dep_map, name, key);
-#endif
 }
 
 /*
diff -puN lib/spinlock_debug.c~lock-validator-v3 lib/spinlock_debug.c
--- devel/lib/spinlock_debug.c~lock-validator-v3	2006-06-06 13:25:54.000000000 -0700
+++ devel-akpm/lib/spinlock_debug.c	2006-06-06 13:25:54.000000000 -0700
@@ -13,6 +13,42 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 
+void __spin_lock_init(spinlock_t *lock, const char *name,
+		      struct lockdep_type_key *key)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+	lockdep_init_map(&lock->dep_map, name, key);
+#endif
+	lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+	lock->magic = SPINLOCK_MAGIC;
+	lock->owner = SPINLOCK_OWNER_INIT;
+	lock->owner_cpu = -1;
+}
+
+EXPORT_SYMBOL(__spin_lock_init);
+
+void __rwlock_init(rwlock_t *lock, const char *name,
+		   struct lockdep_type_key *key)
+{
+#ifdef CONFIG_DEBUG_RWLOCK_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+	lockdep_init_map(&lock->dep_map, name, key);
+#endif
+	lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED;
+	lock->magic = RWLOCK_MAGIC;
+	lock->owner = SPINLOCK_OWNER_INIT;
+	lock->owner_cpu = -1;
+}
+
+EXPORT_SYMBOL(__rwlock_init);
+
 static void spin_bug(spinlock_t *lock, const char *msg)
 {
 	struct task_struct *owner = NULL;
_

Patches currently in -mm which might be from mingo@xxxxxxx are

git-acpi.patch
fix-drivers-mfd-ucb1x00-corec-irq-probing-bug.patch
ieee1394-semaphore-to-mutex-conversion.patch
git-infiniband.patch
git-netdev-all.patch
lock-validator-fix-ns83820c-irq-flags-bug.patch
lock-validator-fix-ns83820c-irq-flags-bug-part.patch
lock-validator-fix-ns83820c-irq-flags-part-3.patch
ipw2200-locking-fix.patch
revert-gregkh-pci-pci-test-that-drivers-properly-call-pci_set_master.patch
fall-back-to-old-style-call-trace-if-no-unwinding.patch
allow-unwinder-to-build-without-module-support.patch
lock-validator-lockdep-small-xfs-init_rwsem-cleanup.patch
swapless-pm-add-r-w-migration-entries.patch
mm-slabc-fix-early-init-assumption.patch
i386-break-out-of-recursion-in-stackframe-walk.patch
x86-re-enable-generic-numa.patch
vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma.patch
vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-tidy.patch
vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-arch_vma_name-fix.patch
vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-vs-x86_64-mm-reliable-stack-trace-support-i386.patch
vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-vs-x86_64-mm-reliable-stack-trace-support-i386-2.patch
powerpc-vdso-updates.patch
work-around-ppc64-bootup-bug-by-making-mutex-debugging-save-restore-irqs.patch
kernel-kernel-cpuc-to-mutexes.patch
cond-resched-might-sleep-fix.patch
define-__raw_get_cpu_var-and-use-it.patch
ide-cd-end-of-media-error-fix.patch
spin-rwlock-init-cleanups.patch
inotify-split-kernel-api-from-userspace-support.patch
lock-validator-introduce-warn_on_oncecond.patch
lock-validator-introduce-warn_on_oncecond-speedup.patch
emu10k1-mark-midi_spinlock-as-used.patch
epoll-use-unlocked-wqueue-operations.patch
time-clocksource-infrastructure.patch
sched-fix-smt-nice-lock-contention-and-optimization.patch
sched-fix-smt-nice-lock-contention-and-optimization-tidy.patch
sched-comment-bitmap-size-accounting.patch
sched-fix-interactive-ceiling-code.patch
sched-implement-smpnice.patch
sched-protect-calculation-of-max_pull-from-integer-wrap.patch
sched-store-weighted-load-on-up.patch
sched-add-discrete-weighted-cpu-load-function.patch
sched-prevent-high-load-weight-tasks-suppressing-balancing.patch
sched-improve-stability-of-smpnice-load-balancing.patch
sched-improve-smpnice-load-balancing-when-load-per-task.patch
smpnice-dont-consider-sched-groups-which-are-lightly-loaded-for-balancing.patch
smpnice-dont-consider-sched-groups-which-are-lightly-loaded-for-balancing-fix.patch
sched-modify-move_tasks-to-improve-load-balancing-outcomes.patch
sched-avoid-unnecessarily-moving-highest-priority-task-move_tasks.patch
sched-avoid-unnecessarily-moving-highest-priority-task-move_tasks-fix-2.patch
sched_domain-handle-kmalloc-failure.patch
sched_domain-handle-kmalloc-failure-fix.patch
sched_domain-dont-use-gfp_atomic.patch
sched_domain-use-kmalloc_node.patch
sched_domain-allocate-sched_group-structures-dynamically.patch
sched-add-above-background-load-function.patch
mm-implement-swap-prefetching-fix.patch
pi-futex-futex-code-cleanups.patch
pi-futex-robust-futex-docs-fix.patch
pi-futex-introduce-debug_check_no_locks_freed.patch
pi-futex-introduce-warn_on_smp.patch
pi-futex-add-plist-implementation.patch
pi-futex-scheduler-support-for-pi.patch
pi-futex-rt-mutex-core.patch
pi-futex-rt-mutex-docs.patch
pi-futex-rt-mutex-docs-update.patch
pi-futex-rt-mutex-debug.patch
pi-futex-rt-mutex-tester.patch
pi-futex-rt-mutex-futex-api.patch
pi-futex-futex_lock_pi-futex_unlock_pi-support.patch
futex_requeue-optimization.patch
fix-ide-deadlock-in-error-reporting-code.patch
genirq-rename-desc-handler-to-desc-chip.patch
genirq-rename-desc-handler-to-desc-chip-power-fix.patch
genirq-rename-desc-handler-to-desc-chip-ia64-fix.patch
genirq-rename-desc-handler-to-desc-chip-ia64-fix-2.patch
genirq-sem2mutex-probe_sem-probing_active.patch
genirq-cleanup-merge-irq_affinity-into-irq_desc.patch
genirq-cleanup-remove-irq_descp.patch
genirq-cleanup-remove-irq_descp-fix.patch
genirq-cleanup-remove-fastcall.patch
genirq-cleanup-misc-code-cleanups.patch
genirq-cleanup-reduce-irq_desc_t-use-mark-it-obsolete.patch
genirq-cleanup-include-linux-irqh.patch
genirq-cleanup-merge-irq_dir-smp_affinity_entry-into-irq_desc.patch
genirq-cleanup-merge-pending_irq_cpumask-into-irq_desc.patch
genirq-cleanup-turn-arch_has_irq_per_cpu-into-config_irq_per_cpu.patch
genirq-debug-better-debug-printout-in-enable_irq.patch
genirq-add-retrigger-irq-op-to-consolidate-hw_irq_resend.patch
genirq-doc-comment-include-linux-irqh-structures.patch
genirq-doc-handle_irq_event-and-__do_irq-comments.patch
genirq-cleanup-no_irq_type-cleanups.patch
genirq-doc-add-design-documentation.patch
genirq-add-genirq-sw-irq-retrigger.patch
genirq-add-irq_noprobe-support.patch
genirq-add-irq_norequest-support.patch
genirq-add-irq_noautoen-support.patch
genirq-update-copyrights.patch
genirq-core.patch
genirq-msi-fixes-2.patch
genirq-add-irq-chip-support.patch
genirq-add-irq-chip-support-fix.patch
genirq-add-handle_bad_irq.patch
genirq-add-irq-wake-power-management-support.patch
genirq-add-sa_trigger-support.patch
genirq-cleanup-no_irq_type-no_irq_chip-rename.patch
genirq-convert-the-x86_64-architecture-to-irq-chips.patch
genirq-convert-the-i386-architecture-to-irq-chips.patch
genirq-convert-the-i386-architecture-to-irq-chips-fix-2.patch
genirq-more-verbose-debugging-on-unexpected-irq-vectors.patch
genirq-add-chip-eoi-fastack-fasteoi.patch
genirq-add-chip-eoi-fastack-fasteoi-fix.patch
lock-validator-sparc64-sparc-m68k-alpha-cris-build-fix.patch
lock-validator-floppyc-irq-release-fix.patch
lock-validator-floppyc-irq-release-fix-fix.patch
lock-validator-floppyc-irq-release-fix-fix-fix.patch
lock-validator-forcedethc-fix.patch
lock-validator-mutex-section-binutils-workaround.patch
lock-validator-add-__module_address-method.patch
lock-validator-better-lock-debugging.patch
lock-validator-locking-api-self-tests.patch
lock-validator-locking-api-self-tests-self-test-fix.patch
lock-validator-locking-init-debugging-improvement.patch
lock-validator-beautify-x86_64-stacktraces.patch
lock-validator-beautify-x86_64-stacktraces-fix.patch
lock-validator-beautify-x86_64-stacktraces-fix-2.patch
lock-validator-beautify-x86_64-stacktraces-fix-3.patch
lock-validator-beautify-x86_64-stacktraces-fix-4.patch
lock-validator-x86_64-document-stack-frame-internals.patch
lock-validator-stacktrace.patch
lock-validator-stacktrace-build-fix.patch
lock-validator-stacktrace-warning-fix.patch
lock-validator-stacktrace-fix-on-x86_64.patch
lock-validator-fown-locking-workaround.patch
lock-validator-sk_callback_lock-workaround.patch
lock-validator-irqtrace-core.patch
lock-validator-irqtrace-core-powerpc-fix-1.patch
lock-validator-irqtrace-core-non-x86-fix.patch
lock-validator-irqtrace-core-non-x86-fix-2.patch
lock-validator-irqtrace-core-non-x86-fix-3.patch
lock-validator-irqtrace-entrys-fix.patch
lock-validator-irqtrace-core-remove-softirqc-warn_on.patch
lock-validator-irqtrace-cleanup-include-asm-i386-irqflagsh.patch
lock-validator-irqtrace-cleanup-include-asm-x86_64-irqflagsh.patch
lock-validator-x86_64-irqflags-trace-entrys-fix.patch
lock-validator-lockdep-add-local_irq_enable_in_hardirq-api.patch
lock-validator-add-per_cpu_offset.patch
lock-validator-add-per_cpu_offset-fix.patch
lock-validator-core.patch
lock-validator-core-early_boot_irqs_-build-fix.patch
lock-validator-core-early_boot_irqs_-build-fix-sparc64-sparc-m68k-alpha-cris-irqtrace-build-fix.patch
lock-validator-core-fix-compiler-warning.patch
lock-validator-core-add-config_debug_non_nested_unlocks.patch
lock-validator-procfs.patch
lock-validator-core-multichar-fix.patch
lock-validator-core-count_matching_names-fix.patch
lock-validator-design-docs.patch
lock-validator-prove-rwsem-locking-correctness.patch
lock-validator-prove-rwsem-locking-correctness-fix.patch
lock-validator-prove-rwsem-locking-correctness-powerpc-fix.patch
lock-validator-prove-spinlock-rwlock-locking-correctness.patch
lock-validator-prove-mutex-locking-correctness.patch
lock-validator-prove-mutex-locking-correctness-fix-null-type-name-bug.patch
better-lock-debugging-remove-mutex-deadlock-checking-code.patch
lock-validator-print-all-lock-types-on-sysrq-d.patch
lock-validator-x86_64-early-init.patch
lock-validator-smp-alternatives-workaround.patch
lock-validator-do-not-recurse-in-printk.patch
lock-validator-disable-nmi-watchdog-if-config_lockdep.patch
lock-validator-disable-nmi-watchdog-if-config_lockdep-i386.patch
lock-validator-disable-nmi-watchdog-if-config_lockdep-x86_64.patch
lock-validator-special-locking-bdev.patch
lock-validator-special-locking-bdev-fix.patch
lock-validator-special-locking-direct-io.patch
lock-validator-special-locking-serial.patch
lock-validator-special-locking-serial-fix.patch
lock-validator-special-locking-dcache.patch
lock-validator-special-locking-i_mutex.patch
lock-validator-special-locking-s_lock.patch
lock-validator-special-locking-futex.patch
lock-validator-special-locking-genirq.patch
lock-validator-special-locking-genirq-lock-validator-early_init_irq_lock_type-build-fix.patch
lock-validator-special-locking-completions.patch
lock-validator-special-locking-waitqueues.patch
lock-validator-special-locking-mm.patch
lock-validator-special-locking-serio.patch
lock-validator-special-locking-slab.patch
lock-validator-special-locking-skb_queue_head_init.patch
lock-validator-special-locking-net-ipv4-igmpcpatch.patch
lock-validator-special-locking-net-ipv4-igmpc-2.patch
lock-validator-special-locking-timerc.patch
lock-validator-special-locking-schedc.patch
lock-validator-special-locking-sctp.patch
lock-validator-special-locking-hrtimerc.patch
lock-validator-special-locking-sock_lock_init.patch
lock-validator-special-locking-af_unix.patch
lock-validator-special-locking-bh_lock_sock.patch
lock-validator-special-locking-mmap_sem.patch
lock-validator-special-locking-sb-s_umount.patch
lock-validator-special-locking-reiser4-false-positive.patch
lock-validator-special-locking-sb-s_umount-fix.patch
lock-validator-special-locking-sb-s_umount-2.patch
lock-validator-special-locking-sb-s_umount-2-fix.patch
lockdep-annotate-rpc_populate-for.patch
lock-validator-special-locking-jbd.patch
lock-validator-special-locking-posix-timers.patch
lock-validator-special-locking-sch_genericc.patch
lock-validator-special-locking-xfrm.patch
lockdep-add-i_mutex-ordering-annotations-to-the-sunrpc.patch
lockdep-add-parent-child-annotations-to-usbfs.patch
lock-validator-special-locking-sound-core-seq-seq_portsc.patch
lock-validator-special-locking-sound-core-seq-seq_devicec.patch
lock-validator-special-locking-sound-core-seq-seq_devicec-fix.patch
lock-validator-fix-rt_hash_lock_sz.patch
lock-validator-introduce-irq__lockdep.patch
locking-validator-special-rule-8390c-disable_irq.patch
locking-validator-special-rule-3c59xc-disable_irq.patch
lock-validator-enable-lock-validator-in-kconfig.patch
lock-validator-enable-lock-validator-in-kconfig-require-trace_irqflags_support.patch
lock-validator-enable-lock-validator-in-kconfig-not-yet.patch
lock-validator-enable-lock-validator-in-kconfig-add-config_debug_non_nested_unlocks-kconfig.patch
lockdep-one-stacktrace-column-if-config_lockdep=y.patch
i386-remove-multi-entry-backtraces.patch
lockdep-further-improve-stacktrace-output.patch
lock-validator-irqtrace-support-non-x86-architectures.patch
lock-validator-disable-oprofile-if-lockdep=y.patch
lock-validator-select-kallsyms_all.patch
lock-validator-v3.patch
lock-validator-special-locking-kgdb.patch
detect-atomic-counter-underflows.patch
debug-shared-irqs.patch
make-frame_pointer-default=y.patch
mutex-subsystem-synchro-test-module.patch
vdso-print-fatal-signals.patch
vdso-improve-print_fatal_signals-support-by-adding-memory-maps.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux