Commit-ID: 446f3dc8cc0af59259c6c8b898726fae7ed2c055 Gitweb: http://git.kernel.org/tip/446f3dc8cc0af59259c6c8b898726fae7ed2c055 Author: Pan Xinhui <xinhui.pan@xxxxxxxxxxxxxxxxxx> AuthorDate: Wed, 2 Nov 2016 05:08:33 -0400 Committer: Ingo Molnar <mingo@xxxxxxxxxx> CommitDate: Tue, 22 Nov 2016 12:48:07 +0100 locking/core, x86/paravirt: Implement vcpu_is_preempted(cpu) for KVM and Xen guests Optimize spinlock and mutex busy-loops by providing a vcpu_is_preempted(cpu) function on KVM and Xen platforms. Extend the pv_lock_ops interface accordingly and implement the callbacks on KVM and Xen. Signed-off-by: Pan Xinhui <xinhui.pan@xxxxxxxxxxxxxxxxxx> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> [ Translated to English. ] Acked-by: Paolo Bonzini <pbonzini@xxxxxxxxxx> Cc: David.Laight@xxxxxxxxxx Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: benh@xxxxxxxxxxxxxxxxxxx Cc: boqun.feng@xxxxxxxxx Cc: borntraeger@xxxxxxxxxx Cc: bsingharora@xxxxxxxxx Cc: dave@xxxxxxxxxxxx Cc: jgross@xxxxxxxx Cc: kernellwp@xxxxxxxxx Cc: konrad.wilk@xxxxxxxxxx Cc: linuxppc-dev@xxxxxxxxxxxxxxxx Cc: mpe@xxxxxxxxxxxxxx Cc: paulmck@xxxxxxxxxxxxxxxxxx Cc: paulus@xxxxxxxxx Cc: rkrcmar@xxxxxxxxxx Cc: virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx Cc: will.deacon@xxxxxxx Cc: xen-devel-request@xxxxxxxxxxxxxxxxxxxx Cc: xen-devel@xxxxxxxxxxxxxxxxxxxx Link: http://lkml.kernel.org/r/1478077718-37424-7-git-send-email-xinhui.pan@xxxxxxxxxxxxxxxxxx Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> --- arch/x86/include/asm/paravirt_types.h | 2 ++ arch/x86/include/asm/spinlock.h | 8 ++++++++ arch/x86/kernel/paravirt-spinlocks.c | 6 ++++++ 3 files changed, 16 insertions(+) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 0f400c0..38c3bb7 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -310,6 +310,8 @@ struct pv_lock_ops { void (*wait)(u8 *ptr, u8 val); void (*kick)(int cpu); + + bool (*vcpu_is_preempted)(int cpu); }; /* This contains all the paravirt structures: we get a convenient diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 921bea7..0526f59 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -26,6 +26,14 @@ extern struct static_key paravirt_ticketlocks_enabled; static __always_inline bool static_key_false(struct static_key *key); +#ifdef CONFIG_PARAVIRT_SPINLOCKS +#define vcpu_is_preempted vcpu_is_preempted +static inline bool vcpu_is_preempted(int cpu) +{ + return pv_lock_ops.vcpu_is_preempted(cpu); +} +#endif + #include <asm/qspinlock.h> /* diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 2c55a00..2f204dd 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -21,12 +21,18 @@ bool pv_is_native_spin_unlock(void) __raw_callee_save___native_queued_spin_unlock; } +static bool native_vcpu_is_preempted(int cpu) +{ + return 0; +} + struct pv_lock_ops pv_lock_ops = { #ifdef CONFIG_SMP .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath, .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock), .wait = paravirt_nop, .kick = paravirt_nop, + .vcpu_is_preempted = native_vcpu_is_preempted, #endif /* SMP */ }; EXPORT_SYMBOL(pv_lock_ops); -- To unsubscribe from this list: send the line "unsubscribe linux-tip-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html
![]() |