From: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> Detect PVM hypervisor support through the use of the PVM synthetic instruction 'PVM_SYNTHETIC_CPUID'. This is a necessary step in preparing to initialize the PVM guest during booting. Signed-off-by: Lai Jiangshan <jiangshan.ljs@xxxxxxxxxxxx> Signed-off-by: Hou Wenlong <houwenlong.hwl@xxxxxxxxxxxx> --- arch/x86/include/asm/pvm_para.h | 69 +++++++++++++++++++++++++++++++++ arch/x86/kernel/Makefile | 1 + arch/x86/kernel/pvm.c | 22 +++++++++++ 3 files changed, 92 insertions(+) create mode 100644 arch/x86/include/asm/pvm_para.h create mode 100644 arch/x86/kernel/pvm.c diff --git a/arch/x86/include/asm/pvm_para.h b/arch/x86/include/asm/pvm_para.h new file mode 100644 index 000000000000..efd7afdf9be9 --- /dev/null +++ b/arch/x86/include/asm/pvm_para.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_PVM_PARA_H +#define _ASM_X86_PVM_PARA_H + +#include <linux/init.h> +#include <uapi/asm/pvm_para.h> + +#ifdef CONFIG_PVM_GUEST +#include <asm/irqflags.h> +#include <uapi/asm/kvm_para.h> + +void __init pvm_early_setup(void); + +static inline void pvm_cpuid(unsigned int *eax, unsigned int *ebx, + unsigned int *ecx, unsigned int *edx) +{ + asm(__ASM_FORM(.byte PVM_SYNTHETIC_CPUID ;) + : "=a" (*eax), + "=b" (*ebx), + "=c" (*ecx), + "=d" (*edx) + : "0" (*eax), "2" (*ecx)); +} + +/* + * pvm_detect() is called before event handling is set up and it might be + * possibly called under any hypervisor other than PVM, so it should not + * trigger any trap in all possible scenarios. PVM_SYNTHETIC_CPUID is supposed + * to not trigger any trap in the real or virtual x86 kernel mode and is also + * guaranteed to trigger a trap in the underlying hardware user mode for the + * hypervisor emulating it. + */ +static inline bool pvm_detect(void) +{ + unsigned long cs; + uint32_t eax, signature[3]; + + /* check underlying interrupt flags */ + if (arch_irqs_disabled_flags(native_save_fl())) + return false; + + /* check underlying CS */ + asm volatile("mov %%cs,%0\n\t" : "=r" (cs) : ); + if ((cs & 3) != 3) + return false; + + /* check KVM_SIGNATURE and KVM_CPUID_VENDOR_FEATURES */ + eax = KVM_CPUID_SIGNATURE; + pvm_cpuid(&eax, &signature[0], &signature[1], &signature[2]); + if (memcmp(KVM_SIGNATURE, signature, 12)) + return false; + if (eax < KVM_CPUID_VENDOR_FEATURES) + return false; + + /* check PVM_CPUID_SIGNATURE */ + eax = KVM_CPUID_VENDOR_FEATURES; + pvm_cpuid(&eax, &signature[0], &signature[1], &signature[2]); + if (signature[0] != PVM_CPUID_SIGNATURE) + return false; + + return true; +} +#else +static inline void pvm_early_setup(void) +{ +} +#endif /* CONFIG_PVM_GUEST */ + +#endif /* _ASM_X86_PVM_PARA_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index dc1f5a303e9b..67f11f7d5c88 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -129,6 +129,7 @@ obj-$(CONFIG_AMD_NB) += amd_nb.o obj-$(CONFIG_DEBUG_NMI_SELFTEST) += nmi_selftest.o obj-$(CONFIG_KVM_GUEST) += kvm.o kvmclock.o +obj-$(CONFIG_PVM_GUEST) += pvm.o obj-$(CONFIG_PARAVIRT) += paravirt.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o diff --git a/arch/x86/kernel/pvm.c b/arch/x86/kernel/pvm.c new file mode 100644 index 000000000000..2d27044eaf25 --- /dev/null +++ b/arch/x86/kernel/pvm.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * KVM PVM paravirt_ops implementation + * + * Copyright (C) 2020 Ant Group + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ +#define pr_fmt(fmt) "pvm-guest: " fmt + +#include <asm/cpufeature.h> +#include <asm/pvm_para.h> + +void __init pvm_early_setup(void) +{ + if (!pvm_detect()) + return; + + setup_force_cpu_cap(X86_FEATURE_KVM_PVM_GUEST); +} -- 2.19.1.6.gb485710b