vcpu_is_preempted() now can represent the actual state of the VCPU, so the scheduler can make better decisions when it picks the idle CPU to enqueue a task on. Signed-off-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> --- arch/arm64/include/asm/spinlock.h | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index 0525c0b089ed..1d579497e1b8 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -7,21 +7,23 @@ #include <asm/qspinlock.h> #include <asm/qrwlock.h> +#include <asm/paravirt.h> /* See include/linux/spinlock.h */ #define smp_mb__after_spinlock() smp_mb() -/* - * Changing this will break osq_lock() thanks to the call inside - * smp_cond_load_relaxed(). - * - * See: - * https://lore.kernel.org/lkml/20200110100612.GC2827@xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - */ #define vcpu_is_preempted vcpu_is_preempted -static inline bool vcpu_is_preempted(int cpu) + +#ifdef CONFIG_PARAVIRT +static inline bool vcpu_is_preempted(unsigned int cpu) +{ + return paravirt_vcpu_is_preempted(cpu); +} +#else +static inline bool vcpu_is_preempted(unsigned int cpu) { return false; } +#endif /* CONFIG_PARAVIRT */ #endif /* __ASM_SPINLOCK_H */ -- 2.32.0.93.g670b81a890-goog _______________________________________________ kvmarm mailing list kvmarm@xxxxxxxxxxxxxxxxxxxxx https://lists.cs.columbia.edu/mailman/listinfo/kvmarm