On Mon, 2012-07-16 at 12:19 +0200, Thomas Gleixner wrote: > > @@ -647,8 +648,11 @@ static inline void rt_spin_lock_fastlock > > > > if (likely(rt_mutex_cmpxchg(lock, NULL, current))) > > rt_mutex_deadlock_account_lock(lock, current); > > - else > > + else { > > + if (blk_needs_flush_plug(current)) > > + blk_schedule_flush_plug(current); > > slowfn(lock); > > + } > > That should do the trick. Box has been grinding away long enough now to agree that it did. rt: pull your plug before blocking Queued IO can lead to IO deadlock should a task require wakeup from as task which is blocked on that queued IO. ext3: dbench1 queues a buffer, blocks on journal mutex, it's plug is not pulled. dbench2 mutex owner is waiting for kjournald, who is waiting for the buffer queued by dbench1. Game over. Signed-off-by: Mike Galbraith <efault@xxxxxx> diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index d58db99..39140a5 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c @@ -20,6 +20,7 @@ #include <linux/module.h> #include <linux/sched.h> #include <linux/timer.h> +#include <linux/blkdev.h> #include "rtmutex_common.h" @@ -647,8 +648,11 @@ static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, if (likely(rt_mutex_cmpxchg(lock, NULL, current))) rt_mutex_deadlock_account_lock(lock, current); - else + else { + if (blk_needs_flush_plug(current)) + blk_schedule_flush_plug(current); slowfn(lock); + } } static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, @@ -1104,8 +1108,11 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state, if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { rt_mutex_deadlock_account_lock(lock, current); return 0; - } else + } else { + if (blk_needs_flush_plug(current)) + blk_schedule_flush_plug(current); return slowfn(lock, state, NULL, detect_deadlock); + } } static inline int -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html