Commit-ID: 24a376d65177009a4dd8d846543c5dc69f5c4ced Gitweb: https://git.kernel.org/tip/24a376d65177009a4dd8d846543c5dc69f5c4ced Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx> AuthorDate: Thu, 1 Aug 2019 15:30:28 +0200 Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx> CommitDate: Tue, 6 Aug 2019 12:49:16 +0200 locking/qspinlock,x86: Clarify virt_spin_lock_key Add a few comments to clarify how this is supposed to work. Reported-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Juergen Gross <jgross@xxxxxxxx> --- arch/x86/include/asm/qspinlock.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index bd5ac6cc37db..444d6fd9a6d8 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h @@ -63,10 +63,25 @@ static inline bool vcpu_is_preempted(long cpu) #endif #ifdef CONFIG_PARAVIRT +/* + * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack. + * + * Native (and PV wanting native due to vCPU pinning) should disable this key. + * It is done in this backwards fashion to only have a single direction change, + * which removes ordering between native_pv_spin_init() and HV setup. + */ DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key); void native_pv_lock_init(void) __init; +/* + * Shortcut for the queued_spin_lock_slowpath() function that allows + * virt to hijack it. + * + * Returns: + * true - lock has been negotiated, all done; + * false - queued_spin_lock_slowpath() will do its thing. + */ #define virt_spin_lock virt_spin_lock static inline bool virt_spin_lock(struct qspinlock *lock) {
![]() |