[patch 4/4] fs: jbd/jbd2: Substitute BH locks for RT and lock debugging

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Bit spinlocks are problematic if PREEMPT_RT is enabled. They disable
preemption, which is undesired for latency reasons and breaks when regular
spinlocks are taken within the bit_spinlock locked region because regular
spinlocks are converted to 'sleeping spinlocks' on RT.

Substitute the BH_State and BH_JournalHead bit spinlocks with regular
spinlock for PREEMPT_RT enabled kernels.

Bit spinlocks are also not covered by lock debugging, e.g. lockdep. With
the spinlock substitution in place, they can be exposed via
CONFIG_DEBUG_BIT_SPINLOCKS.

Originally-by: Steven Rostedt <rostedt@xxxxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: linux-ext4@xxxxxxxxxxxxxxx
Cc: "Theodore Ts'o" <tytso@xxxxxxx>
Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
Cc: Jan Kara <jack@xxxxxxxx>
--
 include/linux/buffer_head.h |    8 ++++++++
 include/linux/jbd2.h        |   36 ++++++++++++++++++++++++++++++++++++
 2 files changed, 44 insertions(+)

--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -79,6 +79,10 @@ struct buffer_head {
 
 #if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_DEBUG_BIT_SPINLOCKS)
 	spinlock_t b_uptodate_lock;
+# if IS_ENABLED(CONFIG_JBD2)
+	spinlock_t b_state_lock;
+	spinlock_t b_journal_head_lock;
+# endif
 #endif
 };
 
@@ -101,6 +105,10 @@ bh_uptodate_unlock_irqrestore(struct buf
 static inline void buffer_head_init_locks(struct buffer_head *bh)
 {
 	spin_lock_init(&bh->b_uptodate_lock);
+#if IS_ENABLED(CONFIG_JBD2)
+	spin_lock_init(&bh->b_state_lock);
+	spin_lock_init(&bh->b_journal_head_lock);
+#endif
 }
 
 #else /* PREEMPT_RT || DEBUG_BIT_SPINLOCKS */
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -342,6 +342,40 @@ static inline struct journal_head *bh2jh
 	return bh->b_private;
 }
 
+#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_DEBUG_BIT_SPINLOCKS)
+
+static inline void jbd_lock_bh_state(struct buffer_head *bh)
+{
+	spin_lock(&bh->b_state_lock);
+}
+
+static inline int jbd_trylock_bh_state(struct buffer_head *bh)
+{
+	return spin_trylock(&bh->b_state_lock);
+}
+
+static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
+{
+	return spin_is_locked(&bh->b_state_lock);
+}
+
+static inline void jbd_unlock_bh_state(struct buffer_head *bh)
+{
+	spin_unlock(&bh->b_state_lock);
+}
+
+static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
+{
+	spin_lock(&bh->b_journal_head_lock);
+}
+
+static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
+{
+	spin_unlock(&bh->b_journal_head_lock);
+}
+
+#else /* PREEMPT_RT || DEBUG_BIT_SPINLOCKS */
+
 static inline void jbd_lock_bh_state(struct buffer_head *bh)
 {
 	bit_spin_lock(BH_State, &bh->b_state);
@@ -372,6 +406,8 @@ static inline void jbd_unlock_bh_journal
 	bit_spin_unlock(BH_JournalHead, &bh->b_state);
 }
 
+#endif /* !PREEMPT_RT && !DEBUG_BIT_SPINLOCKS */
+
 #define J_ASSERT(assert)	BUG_ON(!(assert))
 
 #define J_ASSERT_BH(bh, expr)	J_ASSERT(expr)





[Index of Archives]     [Reiser Filesystem Development]     [Ceph FS]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite National Park]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Device Mapper]     [Linux Media]

  Powered by Linux