The RT patches disable the existing bit_spinlock.h defns with an "#if 0" in favour of re-stating its own within spinlock.h itself. But the latter are incomplete when PREEMPT_RT is not set. The smallest footprint change to work around this is to simply use CONFIG_PREEMPT_RT as a trigger to choose between the new spinlock.h definitions, or the older/existing definitions. This change should be considered for folding into the rt-mutex-core.patch. Signed-off-by: Paul Gortmaker <paul.gortmaker@xxxxxxxxxxxxx> --- include/linux/bit_spinlock.h | 2 +- include/linux/spinlock.h | 2 ++ 2 files changed, 3 insertions(+), 1 deletions(-) diff --git a/include/linux/bit_spinlock.h b/include/linux/bit_spinlock.h index 864ca40..b809011 100644 --- a/include/linux/bit_spinlock.h +++ b/include/linux/bit_spinlock.h @@ -1,7 +1,7 @@ #ifndef __LINUX_BIT_SPINLOCK_H #define __LINUX_BIT_SPINLOCK_H -#if 0 +#ifndef CONFIG_PREEMPT_RT /* * bit-based spin_lock() diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 8abd577..ba0a5e6 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -509,6 +509,7 @@ do { \ __cond_lock(lock, PICK_SPIN_OP_RET(__spin_trylock_irqsave, \ _spin_trylock_irqsave, lock, &flags)) +#ifdef CONFIG_PREEMPT_RT /* * bit-based spin_lock() * @@ -569,6 +570,7 @@ static inline int bit_spin_is_locked(int bitnum, unsigned long *addr) return 1; #endif } +#endif /* CONFIG_PREEMPT_RT */ /** * __raw_spin_can_lock - would __raw_spin_trylock() succeed? -- 1.6.0 -- To unsubscribe from this list: send the line "unsubscribe linux-rt-users" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html