- pi-futex-v2.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     PI-futex: -V2

has been removed from the -mm tree.  Its filename is

     pi-futex-v2.patch

This patch was probably dropped from -mm because
it has now been merged into a subsystem tree or
into Linus's tree, or because it was folded into
its parent patch in the -mm tree.

------------------------------------------------------
Subject: PI-futex: -V2
From: Ingo Molnar <mingo@xxxxxxx>


clean up the code as per Andrew's suggestions:

 - '# ifdef' => '#ifdef'
 - fastcall removal
 - lots of macro -> C function conversions
 - move rtmutex_internals.h to kernel/rtmutex_common.h
 - uninline two larger functions
 - remove noinline
 - explain locking better
 - set_task_state(current, state) => set_current_state(state)

 - fix the PI code (Esben Nielsen)

Signed-off-by: Ingo Molnar <mingo@xxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 include/linux/rtmutex.h          |   29 +---
 include/linux/rtmutex_internal.h |  187 -----------------------------
 kernel/fork.c                    |   11 +
 kernel/futex.c                   |    3 
 kernel/rtmutex-debug.c           |    2 
 kernel/rtmutex-debug.h           |    2 
 kernel/rtmutex.c                 |  116 +++++++++++++++--
 kernel/rtmutex_common.h          |  123 +++++++++++++++++++
 kernel/sched.c                   |    2 
 9 files changed, 247 insertions(+), 228 deletions(-)

diff -puN include/linux/rtmutex.h~pi-futex-v2 include/linux/rtmutex.h
--- devel/include/linux/rtmutex.h~pi-futex-v2	2006-05-19 16:01:33.000000000 -0700
+++ devel-akpm/include/linux/rtmutex.h	2006-05-19 16:01:33.000000000 -0700
@@ -27,14 +27,14 @@ struct rt_mutex {
 	spinlock_t		wait_lock;
 	struct plist_head	wait_list;
 	struct task_struct	*owner;
-# ifdef CONFIG_DEBUG_RT_MUTEXES
+#ifdef CONFIG_DEBUG_RT_MUTEXES
 	int			save_state;
 	struct list_head	held_list;
 	unsigned long		acquire_ip;
 	const char 		*name, *file;
 	int			line;
 	void			*magic;
-# endif
+#endif
 };
 
 struct rt_mutex_waiter;
@@ -79,40 +79,31 @@ struct hrtimer_sleeper;
  *
  * Returns 1 if the mutex is locked, 0 if unlocked.
  */
-static inline int fastcall rt_mutex_is_locked(struct rt_mutex *lock)
+static inline int rt_mutex_is_locked(struct rt_mutex *lock)
 {
 	return lock->owner != NULL;
 }
 
-extern void fastcall __rt_mutex_init(struct rt_mutex *lock, const char *name);
-extern void fastcall rt_mutex_destroy(struct rt_mutex *lock);
+extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
+extern void rt_mutex_destroy(struct rt_mutex *lock);
 
-extern void fastcall rt_mutex_lock(struct rt_mutex *lock);
-extern int fastcall rt_mutex_lock_interruptible(struct rt_mutex *lock,
+extern void rt_mutex_lock(struct rt_mutex *lock);
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
 						int detect_deadlock);
-extern int fastcall rt_mutex_timed_lock(struct rt_mutex *lock,
+extern int rt_mutex_timed_lock(struct rt_mutex *lock,
 					struct hrtimer_sleeper *timeout,
 					int detect_deadlock);
 
-extern int fastcall rt_mutex_trylock(struct rt_mutex *lock);
+extern int rt_mutex_trylock(struct rt_mutex *lock);
 
-extern void fastcall rt_mutex_unlock(struct rt_mutex *lock);
+extern void rt_mutex_unlock(struct rt_mutex *lock);
 
 #ifdef CONFIG_RT_MUTEXES
-# define rt_mutex_init_task(p)						\
- do {									\
-	spin_lock_init(&p->pi_lock);					\
-	plist_head_init(&p->pi_waiters);				\
-	p->pi_blocked_on = NULL;					\
-	p->pi_locked_by = NULL;						\
-	INIT_LIST_HEAD(&p->pi_lock_chain);				\
- } while (0)
 # define INIT_RT_MUTEXES(tsk)						\
 	.pi_waiters	= PLIST_HEAD_INIT(tsk.pi_waiters),		\
 	.pi_lock	= SPIN_LOCK_UNLOCKED,				\
 	.pi_lock_chain	= LIST_HEAD_INIT(tsk.pi_lock_chain),
 #else
-# define rt_mutex_init_task(p)		do { } while (0)
 # define INIT_RT_MUTEXES(tsk)
 #endif
 
diff -L include/linux/rtmutex_internal.h -puN include/linux/rtmutex_internal.h~pi-futex-v2 /dev/null
--- devel/include/linux/rtmutex_internal.h
+++ /dev/null	2006-05-19 15:26:20.261540500 -0700
@@ -1,187 +0,0 @@
-/*
- * RT Mutexes: blocking mutual exclusion locks with PI support
- *
- * started by Ingo Molnar and Thomas Gleixner:
- *
- *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@xxxxxxxxxx>
- *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@xxxxxxxxxxx>
- *
- * This file contains the private data structure and API definitions.
- */
-
-#ifndef __LINUX_RT_MUTEX_INTERNAL_H
-#define __LINUX_RT_MUTEX_INTERNAL_H
-
-#include <linux/rtmutex.h>
-
-/*
- * The rtmutex in kernel tester is independent of rtmutex debugging. We
- * call schedule_rt_mutex_test() instead of schedule() for the tasks which
- * belong to the tester. That way we can delay the wakeup path of those
- * threads to provoke lock stealing and testing of  complex boosting scenarios.
- */
-#ifdef CONFIG_RT_MUTEX_TESTER
-
-extern void schedule_rt_mutex_test(struct rt_mutex *lock);
-
-#define schedule_rt_mutex(_lock)				\
-  do {								\
-	if (!(current->flags & PF_MUTEX_TESTER))		\
-		schedule();					\
-	else							\
-		schedule_rt_mutex_test(_lock);			\
-  } while (0)
-
-#else
-# define schedule_rt_mutex(_lock)			schedule()
-#endif
-
-/*
- * This is the control structure for tasks blocked on a rt_mutex,
- * which is allocated on the kernel stack on of the blocked task.
- *
- * @list_entry:		pi node to enqueue into the mutex waiters list
- * @pi_list_entry:	pi node to enqueue into the mutex owner waiters list
- * @task:		task reference to the blocked task
- */
-struct rt_mutex_waiter {
-	struct plist_node	list_entry;
-	struct plist_node	pi_list_entry;
-	struct task_struct	*task;
-	struct rt_mutex		*lock;
-#ifdef CONFIG_DEBUG_RT_MUTEXES
-	unsigned long		ip;
-	pid_t			deadlock_task_pid;
-	struct rt_mutex		*deadlock_lock;
-#endif
-};
-
-/*
- * Plist wrapper macros
- */
-#define rt_mutex_has_waiters(lock)	(!plist_head_empty(&lock->wait_list))
-
-#define rt_mutex_top_waiter(lock) 	\
-({ struct rt_mutex_waiter *__w = plist_first_entry(&lock->wait_list, \
-					struct rt_mutex_waiter, list_entry); \
-	BUG_ON(__w->lock != lock);	\
-	__w;				\
-})
-
-#define task_has_pi_waiters(task)	(!plist_head_empty(&task->pi_waiters))
-
-#define task_top_pi_waiter(task) 	\
-	plist_first_entry(&task->pi_waiters, struct rt_mutex_waiter, pi_list_entry)
-
-/*
- * lock->owner state tracking:
- *
- * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
- * are used to keep track of the "owner is pending" and "lock has
- * waiters" state.
- *
- * owner	bit1	bit0
- * NULL		0	0	lock is free (fast acquire possible)
- * NULL		0	1	invalid state
- * NULL		1	0	invalid state
- * NULL		1	1	invalid state
- * taskpointer	0	0	lock is held (fast release possible)
- * taskpointer	0	1	task is pending owner
- * taskpointer	1	0	lock is held and has waiters
- * taskpointer	1	1	task is pending owner and lock has more waiters
- *
- * Pending ownership is assigned to the top (highest priority)
- * waiter of the lock, when the lock is released. The thread is woken
- * up and can now take the lock. Until the lock is taken (bit 0
- * cleared) a competing higher priority thread can steal the lock
- * which puts the woken up thread back on the waiters list.
- *
- * The fast atomic compare exchange based acquire and release is only
- * possible when bit 0 and 1 of lock->owner are 0.
- */
-#define RT_MUTEX_OWNER_PENDING	1UL
-#define RT_MUTEX_HAS_WAITERS	2UL
-#define RT_MUTEX_OWNER_MASKALL	3UL
-
-#define rt_mutex_owner(lock)						\
-({									\
-	typecheck(struct rt_mutex *,(lock));				\
- 	((struct task_struct *)((unsigned long)((lock)->owner) & ~RT_MUTEX_OWNER_MASKALL)); \
-})
-
-#define rt_mutex_real_owner(lock)					\
-({									\
-	typecheck(struct rt_mutex *,(lock));				\
- 	((struct task_struct *)((unsigned long)((lock)->owner) & ~RT_MUTEX_HAS_WAITERS)); \
-})
-
-#define rt_mutex_owner_pending(lock)					\
-({									\
-	typecheck(struct rt_mutex *,(lock));				\
-	((unsigned long)((lock)->owner) & RT_MUTEX_OWNER_PENDING);	\
-})
-
-static inline void rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
-				      unsigned long msk)
-{
-	unsigned long val = ((unsigned long) owner) | msk;
-
-	if (rt_mutex_has_waiters(lock))
-		val |= RT_MUTEX_HAS_WAITERS;
-
-	lock->owner = (struct task_struct *)(val);
-}
-
-static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
-{
-	unsigned long owner;
-
-	owner = ((unsigned long) lock->owner) & ~RT_MUTEX_HAS_WAITERS;
-	lock->owner = (struct task_struct *)(owner);
-}
-
-static inline void fixup_rt_mutex_waiters(struct rt_mutex *lock)
-{
-	if (!rt_mutex_has_waiters(lock))
-		clear_rt_mutex_waiters(lock);
-}
-
-/*
- * We can speed up the acquire/release, if the architecture
- * supports cmpxchg and if there's no debugging state to be set up
- */
-#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
-
-# define rt_mutex_cmpxchg(l,c,n)	(cmpxchg(&l->owner, c, n) == c)
-
-static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
-{
-	unsigned long owner, *p = (unsigned long *) &lock->owner;
-
-	do {
-		owner = *p;
-	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
-}
-
-#else
-
-# define rt_mutex_cmpxchg(l,c,n)	(0)
-
-static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
-{
-	unsigned long owner = ((unsigned long) lock->owner)| RT_MUTEX_HAS_WAITERS;
-
-	lock->owner = (struct task_struct *) owner;
-}
-
-#endif
-
-/*
- * PI-futex support (proxy locking functions, etc.):
- */
-extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
-extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
-				       struct task_struct *proxy_owner);
-extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
-				  struct task_struct *proxy_owner);
-#endif
diff -puN kernel/fork.c~pi-futex-v2 kernel/fork.c
--- devel/kernel/fork.c~pi-futex-v2	2006-05-19 16:01:33.000000000 -0700
+++ devel-akpm/kernel/fork.c	2006-05-19 16:01:33.000000000 -0700
@@ -914,6 +914,17 @@ asmlinkage long sys_set_tid_address(int 
 	return current->pid;
 }
 
+static inline void rt_mutex_init_task(struct task_struct *p)
+{
+#ifdef CONFIG_RT_MUTEXES
+	spin_lock_init(&p->pi_lock);
+	plist_head_init(&p->pi_waiters);
+	p->pi_blocked_on = NULL;
+	p->pi_locked_by = NULL;
+	INIT_LIST_HEAD(&p->pi_lock_chain);
+#endif
+}
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
diff -puN kernel/futex.c~pi-futex-v2 kernel/futex.c
--- devel/kernel/futex.c~pi-futex-v2	2006-05-19 16:01:33.000000000 -0700
+++ devel-akpm/kernel/futex.c	2006-05-19 16:01:33.000000000 -0700
@@ -48,9 +48,10 @@
 #include <linux/pagemap.h>
 #include <linux/syscalls.h>
 #include <linux/signal.h>
-#include <linux/rtmutex_internal.h>
 #include <asm/futex.h>
 
+#include "rtmutex_common.h"
+
 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
 
 /*
diff -puN kernel/rtmutex.c~pi-futex-v2 kernel/rtmutex.c
--- devel/kernel/rtmutex.c~pi-futex-v2	2006-05-19 16:01:33.000000000 -0700
+++ devel-akpm/kernel/rtmutex.c	2006-05-19 16:01:33.000000000 -0700
@@ -12,7 +12,7 @@
 #include <linux/sched.h>
 #include <linux/timer.h>
 
-#include <linux/rtmutex_internal.h>
+#include "rtmutex_common.h"
 
 #ifdef CONFIG_DEBUG_RT_MUTEXES
 # include "rtmutex-debug.h"
@@ -21,6 +21,80 @@
 #endif
 
 /*
+ * lock->owner state tracking:
+ *
+ * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
+ * are used to keep track of the "owner is pending" and "lock has
+ * waiters" state.
+ *
+ * owner	bit1	bit0
+ * NULL		0	0	lock is free (fast acquire possible)
+ * NULL		0	1	invalid state
+ * NULL		1	0	invalid state
+ * NULL		1	1	invalid state
+ * taskpointer	0	0	lock is held (fast release possible)
+ * taskpointer	0	1	task is pending owner
+ * taskpointer	1	0	lock is held and has waiters
+ * taskpointer	1	1	task is pending owner and lock has more waiters
+ *
+ * Pending ownership is assigned to the top (highest priority)
+ * waiter of the lock, when the lock is released. The thread is woken
+ * up and can now take the lock. Until the lock is taken (bit 0
+ * cleared) a competing higher priority thread can steal the lock
+ * which puts the woken up thread back on the waiters list.
+ *
+ * The fast atomic compare exchange based acquire and release is only
+ * possible when bit 0 and 1 of lock->owner are 0.
+ */
+
+static void
+rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
+		   unsigned long mask)
+{
+	unsigned long val = (unsigned long)owner | mask;
+
+	if (rt_mutex_has_waiters(lock))
+		val |= RT_MUTEX_HAS_WAITERS;
+
+	lock->owner = (struct task_struct *)val;
+}
+
+static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
+{
+	lock->owner = (struct task_struct *)
+			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
+}
+
+static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+{
+	if (!rt_mutex_has_waiters(lock))
+		clear_rt_mutex_waiters(lock);
+}
+
+/*
+ * We can speed up the acquire/release, if the architecture
+ * supports cmpxchg and if there's no debugging state to be set up
+ */
+#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
+# define rt_mutex_cmpxchg(l,c,n)	(cmpxchg(&l->owner, c, n) == c)
+static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+{
+	unsigned long owner, *p = (unsigned long *) &lock->owner;
+
+	do {
+		owner = *p;
+	} while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
+}
+#else
+# define rt_mutex_cmpxchg(l,c,n)	(0)
+static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+{
+	lock->owner = (struct task_struct *)
+			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
+}
+#endif
+
+/*
  * Calculate task priority from the waiter list priority
  *
  * Return task->normal_prio when the waiter list is empty or when
@@ -87,6 +161,9 @@ static DEFINE_SPINLOCK(pi_conflicts_lock
  * If 'try' is set, we have to backout if we hit a owner who is
  * running its own pi chain operation. We go back and take the slow
  * path via the pi_conflicts_lock.
+ *
+ * We put all held locks into a list, via ->pi_lock_chain, and walk
+ * this list at unlock_pi_chain() time.
  */
 static int lock_pi_chain(struct rt_mutex *act_lock,
 			 struct rt_mutex_waiter *waiter,
@@ -222,10 +299,15 @@ static void adjust_pi_chain(struct rt_mu
 			plist_del(&top_waiter->pi_list_entry,
 				  &owner->pi_waiters);
 
-		if (waiter && waiter == rt_mutex_top_waiter(lock)) {
+		if (waiter)
 			waiter->pi_list_entry.prio = waiter->task->prio;
-			plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
+
+		if (rt_mutex_has_waiters(lock)) {
+			top_waiter = rt_mutex_top_waiter(lock);
+			plist_add(&top_waiter->pi_list_entry,
+				  &owner->pi_waiters);
 		}
+
 		__rt_mutex_adjust_prio(owner);
 
 		waiter = owner->pi_blocked_on;
@@ -605,7 +687,7 @@ static int remove_waiter(struct rt_mutex
 /*
  * Slow path lock function:
  */
-static int fastcall noinline __sched
+static int __sched
 rt_mutex_slowlock(struct rt_mutex *lock, int state,
 		  struct hrtimer_sleeper *timeout,
 		  int detect_deadlock __IP_DECL__)
@@ -711,7 +793,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
 /*
  * Slow path try-lock function:
  */
-static inline int fastcall
+static inline int
 rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__)
 {
 	unsigned long flags;
@@ -739,7 +821,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lo
 /*
  * Slow path to release a rt-mutex:
  */
-static void fastcall noinline __sched
+static void __sched
 rt_mutex_slowunlock(struct rt_mutex *lock)
 {
 	unsigned long flags;
@@ -773,7 +855,7 @@ rt_mutex_slowunlock(struct rt_mutex *loc
 static inline int
 rt_mutex_fastlock(struct rt_mutex *lock, int state,
 		  int detect_deadlock,
-		  int fastcall (*slowfn)(struct rt_mutex *lock, int state,
+		  int (*slowfn)(struct rt_mutex *lock, int state,
 					 struct hrtimer_sleeper *timeout,
 					 int detect_deadlock __IP_DECL__))
 {
@@ -787,7 +869,7 @@ rt_mutex_fastlock(struct rt_mutex *lock,
 static inline int
 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
 			struct hrtimer_sleeper *timeout, int detect_deadlock,
-			int fastcall (*slowfn)(struct rt_mutex *lock, int state,
+			int (*slowfn)(struct rt_mutex *lock, int state,
 					       struct hrtimer_sleeper *timeout,
 					       int detect_deadlock __IP_DECL__))
 {
@@ -800,7 +882,7 @@ rt_mutex_timed_fastlock(struct rt_mutex 
 
 static inline int
 rt_mutex_fasttrylock(struct rt_mutex *lock,
-		     int fastcall (*slowfn)(struct rt_mutex *lock __IP_DECL__))
+		     int (*slowfn)(struct rt_mutex *lock __IP_DECL__))
 {
 	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
 		rt_mutex_deadlock_account_lock(lock, current);
@@ -811,7 +893,7 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
 
 static inline void
 rt_mutex_fastunlock(struct rt_mutex *lock,
-		    void fastcall (*slowfn)(struct rt_mutex *lock))
+		    void (*slowfn)(struct rt_mutex *lock))
 {
 	if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
 		rt_mutex_deadlock_account_unlock(current);
@@ -824,7 +906,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
  *
  * @lock: the rt_mutex to be locked
  */
-void fastcall __sched rt_mutex_lock(struct rt_mutex *lock)
+void __sched rt_mutex_lock(struct rt_mutex *lock)
 {
 	might_sleep();
 
@@ -843,7 +925,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
  * -EINTR 	when interrupted by a signal
  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
  */
-int fastcall __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
+int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
 						 int detect_deadlock)
 {
 	might_sleep();
@@ -868,7 +950,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interrup
  * -ETIMEOUT	when the timeout expired
  * -EDEADLK	when the lock would deadlock (when deadlock detection is on)
  */
-int fastcall
+int
 rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
 		    int detect_deadlock)
 {
@@ -887,7 +969,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
  *
  * Returns 1 on success and 0 on contention
  */
-int fastcall __sched rt_mutex_trylock(struct rt_mutex *lock)
+int __sched rt_mutex_trylock(struct rt_mutex *lock)
 {
 	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
 }
@@ -898,7 +980,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
  *
  * @lock: the rt_mutex to be unlocked
  */
-void fastcall __sched rt_mutex_unlock(struct rt_mutex *lock)
+void __sched rt_mutex_unlock(struct rt_mutex *lock)
 {
 	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
 }
@@ -912,7 +994,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
  * use of the mutex is forbidden. The mutex must not be locked when
  * this function is called.
  */
-void fastcall rt_mutex_destroy(struct rt_mutex *lock)
+void rt_mutex_destroy(struct rt_mutex *lock)
 {
 	WARN_ON(rt_mutex_is_locked(lock));
 #ifdef CONFIG_DEBUG_RT_MUTEXES
@@ -931,7 +1013,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
  *
  * Initializing of a locked rt lock is not allowed
  */
-void fastcall __rt_mutex_init(struct rt_mutex *lock, const char *name)
+void __rt_mutex_init(struct rt_mutex *lock, const char *name)
 {
 	lock->owner = NULL;
 	spin_lock_init(&lock->wait_lock);
diff -puN /dev/null kernel/rtmutex_common.h
--- /dev/null	2006-05-19 15:26:20.261540500 -0700
+++ devel-akpm/kernel/rtmutex_common.h	2006-05-19 16:01:33.000000000 -0700
@@ -0,0 +1,123 @@
+/*
+ * RT Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@xxxxxxxxxx>
+ *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@xxxxxxxxxxx>
+ *
+ * This file contains the private data structure and API definitions.
+ */
+
+#ifndef __KERNEL_RTMUTEX_COMMON_H
+#define __KERNEL_RTMUTEX_COMMON_H
+
+#include <linux/rtmutex.h>
+
+/*
+ * The rtmutex in kernel tester is independent of rtmutex debugging. We
+ * call schedule_rt_mutex_test() instead of schedule() for the tasks which
+ * belong to the tester. That way we can delay the wakeup path of those
+ * threads to provoke lock stealing and testing of  complex boosting scenarios.
+ */
+#ifdef CONFIG_RT_MUTEX_TESTER
+
+extern void schedule_rt_mutex_test(struct rt_mutex *lock);
+
+#define schedule_rt_mutex(_lock)				\
+  do {								\
+	if (!(current->flags & PF_MUTEX_TESTER))		\
+		schedule();					\
+	else							\
+		schedule_rt_mutex_test(_lock);			\
+  } while (0)
+
+#else
+# define schedule_rt_mutex(_lock)			schedule()
+#endif
+
+/*
+ * This is the control structure for tasks blocked on a rt_mutex,
+ * which is allocated on the kernel stack on of the blocked task.
+ *
+ * @list_entry:		pi node to enqueue into the mutex waiters list
+ * @pi_list_entry:	pi node to enqueue into the mutex owner waiters list
+ * @task:		task reference to the blocked task
+ */
+struct rt_mutex_waiter {
+	struct plist_node	list_entry;
+	struct plist_node	pi_list_entry;
+	struct task_struct	*task;
+	struct rt_mutex		*lock;
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+	unsigned long		ip;
+	pid_t			deadlock_task_pid;
+	struct rt_mutex		*deadlock_lock;
+#endif
+};
+
+/*
+ * Various helpers to access the waiters-plist:
+ */
+static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
+{
+	return !plist_head_empty(&lock->wait_list);
+}
+
+static inline struct rt_mutex_waiter *
+rt_mutex_top_waiter(struct rt_mutex *lock)
+{
+	struct rt_mutex_waiter *w;
+
+	w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter,
+			       list_entry);
+	BUG_ON(w->lock != lock);
+
+	return w;
+}
+
+static inline int task_has_pi_waiters(struct task_struct *p)
+{
+	return !plist_head_empty(&p->pi_waiters);
+}
+
+static inline struct rt_mutex_waiter *
+task_top_pi_waiter(struct task_struct *p)
+{
+	return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter,
+				  pi_list_entry);
+}
+
+/*
+ * lock->owner state tracking:
+ */
+#define RT_MUTEX_OWNER_PENDING	1UL
+#define RT_MUTEX_HAS_WAITERS	2UL
+#define RT_MUTEX_OWNER_MASKALL	3UL
+
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
+{
+	return (struct task_struct *)
+		((unsigned long)((lock)->owner) & ~RT_MUTEX_OWNER_MASKALL);
+}
+
+static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
+{
+ 	return (struct task_struct *)
+		((unsigned long)((lock)->owner) & ~RT_MUTEX_HAS_WAITERS);
+}
+
+static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
+{
+	return ((unsigned long)((lock)->owner) & RT_MUTEX_OWNER_PENDING);
+}
+
+/*
+ * PI-futex support (proxy locking functions, etc.):
+ */
+extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+				       struct task_struct *proxy_owner);
+extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+				  struct task_struct *proxy_owner);
+#endif
diff -puN kernel/rtmutex-debug.c~pi-futex-v2 kernel/rtmutex-debug.c
--- devel/kernel/rtmutex-debug.c~pi-futex-v2	2006-05-19 16:01:33.000000000 -0700
+++ devel-akpm/kernel/rtmutex-debug.c	2006-05-19 16:01:33.000000000 -0700
@@ -27,7 +27,7 @@
 #include <linux/plist.h>
 #include <linux/fs.h>
 
-#include <linux/rtmutex_internal.h>
+#include "rtmutex_common.h"
 
 #ifdef CONFIG_DEBUG_RT_MUTEXES
 # include "rtmutex-debug.h"
diff -puN kernel/rtmutex-debug.h~pi-futex-v2 kernel/rtmutex-debug.h
--- devel/kernel/rtmutex-debug.h~pi-futex-v2	2006-05-19 16:01:33.000000000 -0700
+++ devel-akpm/kernel/rtmutex-debug.h	2006-05-19 16:01:33.000000000 -0700
@@ -9,8 +9,6 @@
  * This file contains macros used solely by rtmutex.c. Debug version.
  */
 
-#include <linux/rtmutex_internal.h>
-
 #define __IP_DECL__		, unsigned long ip
 #define __IP__			, ip
 #define __RET_IP__		, (unsigned long)__builtin_return_address(0)
diff -puN kernel/sched.c~pi-futex-v2 kernel/sched.c
--- devel/kernel/sched.c~pi-futex-v2	2006-05-19 16:01:33.000000000 -0700
+++ devel-akpm/kernel/sched.c	2006-05-19 16:01:33.000000000 -0700
@@ -3961,7 +3961,7 @@ static void __setscheduler(struct task_s
 	p->rt_priority = prio;
 
 	p->normal_prio = normal_prio(p);
-	/* we are holding p->pi_list already */
+	/* we are holding p->pi_lock already */
 	p->prio = rt_mutex_getprio(p);
 	/*
 	 * SCHED_BATCH tasks are treated as perpetual CPU hogs:
_

Patches currently in -mm which might be from mingo@xxxxxxx are

git-acpi.patch
fix-drivers-mfd-ucb1x00-corec-irq-probing-bug.patch
git-infiniband.patch
git-netdev-all.patch
fix-for-serial-uart-lockup.patch
swapless-pm-add-r-w-migration-entries-fix.patch
i386-break-out-of-recursion-in-stackframe-walk.patch
x86-re-enable-generic-numa.patch
vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma.patch
vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-tidy.patch
vdso-randomize-the-i386-vdso-by-moving-it-into-a-vma-arch_vma_name-fix.patch
work-around-ppc64-bootup-bug-by-making-mutex-debugging-save-restore-irqs.patch
kernel-kernel-cpuc-to-mutexes.patch
cond-resched-might-sleep-fix.patch
define-__raw_get_cpu_var-and-use-it.patch
ide-cd-end-of-media-error-fix.patch
spin-rwlock-init-cleanups.patch
time-clocksource-infrastructure.patch
sched-comment-bitmap-size-accounting.patch
sched-fix-interactive-ceiling-code.patch
sched-implement-smpnice.patch
sched-protect-calculation-of-max_pull-from-integer-wrap.patch
sched-store-weighted-load-on-up.patch
sched-add-discrete-weighted-cpu-load-function.patch
sched-prevent-high-load-weight-tasks-suppressing-balancing.patch
sched-improve-stability-of-smpnice-load-balancing.patch
sched-improve-smpnice-load-balancing-when-load-per-task.patch
smpnice-dont-consider-sched-groups-which-are-lightly-loaded-for-balancing.patch
smpnice-dont-consider-sched-groups-which-are-lightly-loaded-for-balancing-fix.patch
sched-modify-move_tasks-to-improve-load-balancing-outcomes.patch
sched-avoid-unnecessarily-moving-highest-priority-task-move_tasks.patch
sched-avoid-unnecessarily-moving-highest-priority-task-move_tasks-fix-2.patch
sched_domain-handle-kmalloc-failure.patch
sched_domain-handle-kmalloc-failure-fix.patch
sched_domain-dont-use-gfp_atomic.patch
sched_domain-use-kmalloc_node.patch
sched_domain-allocate-sched_group-structures-dynamically.patch
sched-add-above-background-load-function.patch
mm-implement-swap-prefetching-fix.patch
pi-futex-v2.patch
pi-futex-v3.patch
pi-futex-patchset-v4.patch
pi-futex-patchset-v4-update.patch
pi-futex-patchset-v4-fix.patch
rtmutex-remove-buggy-bug_on-in-pi-boosting-code.patch
futex-pi-enforce-waiter-bit-when-owner-died-is-detected.patch
rtmutex-debug-printk-correct-task-information.patch
futex-pi-make-use-of-restart_block-when-interrupted.patch
document-futex-pi-design.patch
futex_requeue-optimization.patch
reiser4.patch
reiser4-spin-rwlock-init-cleanups.patch
genirq-rename-desc-handler-to-desc-chip.patch
genirq-rename-desc-handler-to-desc-chip-power-fix.patch
genirq-rename-desc-handler-to-desc-chip-ia64-fix.patch
genirq-rename-desc-handler-to-desc-chip-ia64-fix-2.patch
genirq-sem2mutex-probe_sem-probing_active.patch
genirq-cleanup-merge-irq_affinity-into-irq_desc.patch
genirq-cleanup-remove-irq_descp.patch
genirq-cleanup-remove-fastcall.patch
genirq-cleanup-misc-code-cleanups.patch
genirq-cleanup-reduce-irq_desc_t-use-mark-it-obsolete.patch
genirq-cleanup-include-linux-irqh.patch
genirq-cleanup-merge-irq_dir-smp_affinity_entry-into-irq_desc.patch
genirq-cleanup-merge-pending_irq_cpumask-into-irq_desc.patch
genirq-cleanup-turn-arch_has_irq_per_cpu-into-config_irq_per_cpu.patch
genirq-debug-better-debug-printout-in-enable_irq.patch
genirq-add-retrigger-irq-op-to-consolidate-hw_irq_resend.patch
genirq-doc-comment-include-linux-irqh-structures.patch
genirq-doc-handle_irq_event-and-__do_irq-comments.patch
genirq-cleanup-no_irq_type-cleanups.patch
genirq-doc-add-design-documentation.patch
genirq-add-genirq-sw-irq-retrigger.patch
genirq-add-irq_noprobe-support.patch
genirq-add-irq_norequest-support.patch
genirq-add-irq_noautoen-support.patch
genirq-update-copyrights.patch
genirq-core.patch
genirq-add-irq-chip-support.patch
genirq-add-handle_bad_irq.patch
genirq-add-irq-wake-power-management-support.patch
genirq-add-sa_trigger-support.patch
genirq-cleanup-no_irq_type-no_irq_chip-rename.patch
genirq-convert-the-x86_64-architecture-to-irq-chips.patch
genirq-convert-the-i386-architecture-to-irq-chips.patch
genirq-convert-the-i386-architecture-to-irq-chips-fix-2.patch
genirq-more-verbose-debugging-on-unexpected-irq-vectors.patch
detect-atomic-counter-underflows.patch
debug-shared-irqs.patch
make-frame_pointer-default=y.patch
mutex-subsystem-synchro-test-module.patch
vdso-print-fatal-signals.patch
vdso-improve-print_fatal_signals-support-by-adding-memory-maps.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux