list_bl would crash with BUG() if we used it without locking. dm-snapshot uses its own locking on realtime kernels (it can't use list_bl because list_bl uses raw spinlock and dm-snapshot takes other non-raw spinlocks while holding bl_lock). To avoid this BUG, we must set LIST_BL_LOCKMASK = 0. This patch is intended only for the realtime kernel patchset, not for the upstream kernel. Signed-off-by: Mikulas Patocka <mpatocka@xxxxxxxxxx> Index: linux-rt-devel/include/linux/list_bl.h =================================================================== --- linux-rt-devel.orig/include/linux/list_bl.h 2019-11-07 14:01:51.000000000 +0100 +++ linux-rt-devel/include/linux/list_bl.h 2019-11-08 10:12:49.000000000 +0100 @@ -19,7 +19,7 @@ * some fast and compact auxiliary data. */ -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)) && !defined(CONFIG_PREEMPT_RT_BASE) #define LIST_BL_LOCKMASK 1UL #else #define LIST_BL_LOCKMASK 0UL @@ -161,9 +161,6 @@ static inline void hlist_bl_lock(struct bit_spin_lock(0, (unsigned long *)b); #else raw_spin_lock(&b->lock); -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - __set_bit(0, (unsigned long *)b); -#endif #endif } @@ -172,9 +169,6 @@ static inline void hlist_bl_unlock(struc #ifndef CONFIG_PREEMPT_RT_BASE __bit_spin_unlock(0, (unsigned long *)b); #else -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) - __clear_bit(0, (unsigned long *)b); -#endif raw_spin_unlock(&b->lock); #endif }