From: Guo Ren <guoren@xxxxxxxxxxxxxxxxx> Using static_call to switch between: native_queued_spin_lock_slowpath() __pv_queued_spin_lock_slowpath() native_queued_spin_unlock() __pv_queued_spin_unlock() Finish the pv_wait implementation, but pv_kick needs the SBI definition of the next patches. Reviewed-by: Leonardo Bras <leobras@xxxxxxxxxx> Signed-off-by: Guo Ren <guoren@xxxxxxxxxxxxxxxxx> Signed-off-by: Guo Ren <guoren@xxxxxxxxxx> --- arch/riscv/include/asm/Kbuild | 1 - arch/riscv/include/asm/qspinlock.h | 35 +++++++++++++ arch/riscv/include/asm/qspinlock_paravirt.h | 29 +++++++++++ arch/riscv/kernel/qspinlock_paravirt.c | 57 +++++++++++++++++++++ arch/riscv/kernel/setup.c | 4 ++ 5 files changed, 125 insertions(+), 1 deletion(-) create mode 100644 arch/riscv/include/asm/qspinlock.h create mode 100644 arch/riscv/include/asm/qspinlock_paravirt.h create mode 100644 arch/riscv/kernel/qspinlock_paravirt.c diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild index ad72f2bd4cc9..85a428ad116d 100644 --- a/arch/riscv/include/asm/Kbuild +++ b/arch/riscv/include/asm/Kbuild @@ -8,6 +8,5 @@ generic-y += spinlock_types.h generic-y += ticket_spinlock.h generic-y += qrwlock.h generic-y += qrwlock_types.h -generic-y += qspinlock.h generic-y += user.h generic-y += vmlinux.lds.h diff --git a/arch/riscv/include/asm/qspinlock.h b/arch/riscv/include/asm/qspinlock.h new file mode 100644 index 000000000000..02ce973b5b6e --- /dev/null +++ b/arch/riscv/include/asm/qspinlock.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c), 2023 Alibaba + * Authors: + * Guo Ren <guoren@xxxxxxxxxxxxxxxxx> + */ + +#ifndef _ASM_RISCV_QSPINLOCK_H +#define _ASM_RISCV_QSPINLOCK_H + +#ifdef CONFIG_PARAVIRT_SPINLOCKS +#include <asm/qspinlock_paravirt.h> + +/* How long a lock should spin before we consider blocking */ +#define SPIN_THRESHOLD (1 << 15) + +void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +void __pv_init_lock_hash(void); +void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); + +static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) +{ + static_call(pv_queued_spin_lock_slowpath)(lock, val); +} + +#define queued_spin_unlock queued_spin_unlock +static inline void queued_spin_unlock(struct qspinlock *lock) +{ + static_call(pv_queued_spin_unlock)(lock); +} +#endif /* CONFIG_PARAVIRT_SPINLOCKS */ + +#include <asm-generic/qspinlock.h> + +#endif /* _ASM_RISCV_QSPINLOCK_H */ diff --git a/arch/riscv/include/asm/qspinlock_paravirt.h b/arch/riscv/include/asm/qspinlock_paravirt.h new file mode 100644 index 000000000000..9681e851f69d --- /dev/null +++ b/arch/riscv/include/asm/qspinlock_paravirt.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c), 2023 Alibaba Cloud + * Authors: + * Guo Ren <guoren@xxxxxxxxxxxxxxxxx> + */ + +#ifndef _ASM_RISCV_QSPINLOCK_PARAVIRT_H +#define _ASM_RISCV_QSPINLOCK_PARAVIRT_H + +void pv_wait(u8 *ptr, u8 val); +void pv_kick(int cpu); + +void dummy_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); +void dummy_queued_spin_unlock(struct qspinlock *lock); + +DECLARE_STATIC_CALL(pv_queued_spin_lock_slowpath, dummy_queued_spin_lock_slowpath); +DECLARE_STATIC_CALL(pv_queued_spin_unlock, dummy_queued_spin_unlock); + +void __init pv_qspinlock_init(void); + +static inline bool pv_is_native_spin_unlock(void) +{ + return false; +} + +void __pv_queued_spin_unlock(struct qspinlock *lock); + +#endif /* _ASM_RISCV_QSPINLOCK_PARAVIRT_H */ diff --git a/arch/riscv/kernel/qspinlock_paravirt.c b/arch/riscv/kernel/qspinlock_paravirt.c new file mode 100644 index 000000000000..85ff5a3ec234 --- /dev/null +++ b/arch/riscv/kernel/qspinlock_paravirt.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c), 2023 Alibaba Cloud + * Authors: + * Guo Ren <guoren@xxxxxxxxxxxxxxxxx> + */ + +#include <linux/static_call.h> +#include <asm/qspinlock_paravirt.h> +#include <asm/sbi.h> + +void pv_kick(int cpu) +{ + return; +} + +void pv_wait(u8 *ptr, u8 val) +{ + unsigned long flags; + + if (in_nmi()) + return; + + local_irq_save(flags); + if (READ_ONCE(*ptr) != val) + goto out; + + /* wait_for_interrupt(); */ +out: + local_irq_restore(flags); +} + +static void native_queued_spin_unlock(struct qspinlock *lock) +{ + smp_store_release(&lock->locked, 0); +} + +DEFINE_STATIC_CALL(pv_queued_spin_lock_slowpath, native_queued_spin_lock_slowpath); +EXPORT_STATIC_CALL(pv_queued_spin_lock_slowpath); + +DEFINE_STATIC_CALL(pv_queued_spin_unlock, native_queued_spin_unlock); +EXPORT_STATIC_CALL(pv_queued_spin_unlock); + +void __init pv_qspinlock_init(void) +{ + if (num_possible_cpus() == 1) + return; + + if(sbi_get_firmware_id() != SBI_EXT_BASE_IMPL_ID_KVM) + return; + + pr_info("PV qspinlocks enabled\n"); + __pv_init_lock_hash(); + + static_call_update(pv_queued_spin_lock_slowpath, __pv_queued_spin_lock_slowpath); + static_call_update(pv_queued_spin_unlock, __pv_queued_spin_unlock); +} diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index e33430e9d97e..052bbfbb7f32 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -304,6 +304,10 @@ static void __init riscv_spinlock_init(void) #ifdef CONFIG_QUEUED_SPINLOCKS virt_spin_lock_init(); #endif + +#ifdef CONFIG_PARAVIRT_SPINLOCKS + pv_qspinlock_init(); +#endif } #endif -- 2.40.1