When we detect a hypervisor (!paravirt, see later patches), revert to a simple test-and-set lock to avoid the horrors of queue preemption. Signed-off-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx> --- arch/x86/include/asm/qspinlock.h | 14 ++++++++++++++ include/asm-generic/qspinlock.h | 7 +++++++ kernel/locking/qspinlock.c | 3 +++ 3 files changed, 24 insertions(+) --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -1,6 +1,7 @@ #ifndef _ASM_X86_QSPINLOCK_H #define _ASM_X86_QSPINLOCK_H +#include <asm/cpufeature.h> #include <asm-generic/qspinlock_types.h> #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) @@ -20,6 +21,19 @@ static inline void queue_spin_unlock(str #endif /* !CONFIG_X86_OOSTORE && !CONFIG_X86_PPRO_FENCE */ +#define virt_queue_spin_lock virt_queue_spin_lock + +static inline bool virt_queue_spin_lock(struct qspinlock *lock) +{ + if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) + return false; + + while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0) + cpu_relax(); + + return true; +} + #include <asm-generic/qspinlock.h> #endif /* _ASM_X86_QSPINLOCK_H */ --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -98,6 +98,13 @@ static __always_inline void queue_spin_u } #endif +#ifndef virt_queue_spin_lock +static __always_inline bool virt_queue_spin_lock(struct qspinlock *lock) +{ + return false; +} +#endif + /* * Initializier */ --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c @@ -247,6 +247,9 @@ void queue_spin_lock_slowpath(struct qsp BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); + if (virt_queue_spin_lock(lock)) + return; + /* * wait for in-progress pending->locked hand-overs * _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization