Assert that 64-bit only MSRs take a #GP when read or written on 32-bit hosts, as opposed to simply skipping the MSRs on 32-bit builds. Force "cpu -host" so that CPUID can be used to check for 64-bit support. Technically, the unit test could/should be even more aggressive and require KVM to inject faults if the vCPU model doesn't support 64-bit mode. But, there are no plans to go to that level of emulation in KVM, and practically speaking there isn't much benefit as allowing a 32-bit vCPU to access the MSRs on a 64-bit host is a benign virtualization hole. Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- lib/x86/processor.h | 22 +++++++++++++++++ x86/msr.c | 57 ++++++++++++++++++++++++++++++++------------- x86/unittests.cfg | 6 +++-- 3 files changed, 67 insertions(+), 18 deletions(-) diff --git a/lib/x86/processor.h b/lib/x86/processor.h index dda57a1..dfe96d0 100644 --- a/lib/x86/processor.h +++ b/lib/x86/processor.h @@ -2,6 +2,7 @@ #define LIBCFLAT_PROCESSOR_H #include "libcflat.h" +#include "desc.h" #include "msr.h" #include <stdint.h> @@ -163,6 +164,7 @@ static inline u8 cpuid_maxphyaddr(void) #define X86_FEATURE_ARCH_CAPABILITIES (CPUID(0x7, 0, EDX, 29)) #define X86_FEATURE_PKS (CPUID(0x7, 0, ECX, 31)) #define X86_FEATURE_NX (CPUID(0x80000001, 0, EDX, 20)) +#define X86_FEATURE_LM (CPUID(0x80000001, 0, EDX, 29)) #define X86_FEATURE_RDPRU (CPUID(0x80000008, 0, EBX, 4)) /* @@ -320,6 +322,26 @@ static inline void wrmsr(u32 index, u64 val) asm volatile ("wrmsr" : : "a"(a), "d"(d), "c"(index) : "memory"); } +static inline int rdmsr_checking(u32 index) +{ + asm volatile (ASM_TRY("1f") + "rdmsr\n\t" + "1:" + : : "c"(index) : "memory", "eax", "edx"); + return exception_vector(); +} + +static inline int wrmsr_checking(u32 index, u64 val) +{ + u32 a = val, d = val >> 32; + + asm volatile (ASM_TRY("1f") + "wrmsr\n\t" + "1:" + : : "a"(a), "d"(d), "c"(index) : "memory"); + return exception_vector(); +} + static inline uint64_t rdpmc(uint32_t index) { uint32_t a, d; diff --git a/x86/msr.c b/x86/msr.c index 4642451..e7ebe8b 100644 --- a/x86/msr.c +++ b/x86/msr.c @@ -6,6 +6,7 @@ struct msr_info { int index; + bool is_64bit_only; const char *name; unsigned long long value; }; @@ -14,26 +15,26 @@ struct msr_info { #define addr_64 0x0000123456789abcULL #define addr_ul (unsigned long)addr_64 -#define MSR_TEST(msr, val) \ - { .index = msr, .name = #msr, .value = val } +#define MSR_TEST(msr, val, only64) \ + { .index = msr, .name = #msr, .value = val, .is_64bit_only = only64 } struct msr_info msr_info[] = { - MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234), - MSR_TEST(MSR_IA32_SYSENTER_ESP, addr_ul), - MSR_TEST(MSR_IA32_SYSENTER_EIP, addr_ul), + MSR_TEST(MSR_IA32_SYSENTER_CS, 0x1234, false), + MSR_TEST(MSR_IA32_SYSENTER_ESP, addr_ul, false), + MSR_TEST(MSR_IA32_SYSENTER_EIP, addr_ul, false), // reserved: 1:2, 4:6, 8:10, 13:15, 17, 19:21, 24:33, 35:63 - MSR_TEST(MSR_IA32_MISC_ENABLE, 0x400c51889), - MSR_TEST(MSR_IA32_CR_PAT, 0x07070707), + MSR_TEST(MSR_IA32_MISC_ENABLE, 0x400c51889, false), + MSR_TEST(MSR_IA32_CR_PAT, 0x07070707, false), + MSR_TEST(MSR_FS_BASE, addr_64, true), + MSR_TEST(MSR_GS_BASE, addr_64, true), + MSR_TEST(MSR_KERNEL_GS_BASE, addr_64, true), #ifdef __x86_64__ - MSR_TEST(MSR_FS_BASE, addr_64), - MSR_TEST(MSR_GS_BASE, addr_64), - MSR_TEST(MSR_KERNEL_GS_BASE, addr_64), - MSR_TEST(MSR_EFER, 0xD00), - MSR_TEST(MSR_LSTAR, addr_64), - MSR_TEST(MSR_CSTAR, addr_64), - MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff), + MSR_TEST(MSR_EFER, 0xD00, false), #endif + MSR_TEST(MSR_LSTAR, addr_64, true), + MSR_TEST(MSR_CSTAR, addr_64, true), + MSR_TEST(MSR_SYSCALL_MASK, 0xffffffff, true), // MSR_IA32_DEBUGCTLMSR needs svm feature LBRV // MSR_VM_HSAVE_PA only AMD host }; @@ -54,12 +55,36 @@ static void test_msr_rw(struct msr_info *msr, unsigned long long val) report(val == r, "%s", msr->name); } +static void test_wrmsr_fault(struct msr_info *msr, unsigned long long val) +{ + unsigned char vector = wrmsr_checking(msr->index, val); + + report(vector == GP_VECTOR, + "Expected #GP on WRSMR(%s, 0x%llx), got vector %d", + msr->name, val, vector); +} + +static void test_rdmsr_fault(struct msr_info *msr) +{ + unsigned char vector = rdmsr_checking(msr->index); + + report(vector == GP_VECTOR, + "Expected #GP on RDSMR(%s), got vector %d", msr->name, vector); +} + int main(int ac, char **av) { + bool is_64bit_host = this_cpu_has(X86_FEATURE_LM); int i; - for (i = 0 ; i < ARRAY_SIZE(msr_info); i++) - test_msr_rw(&msr_info[i], msr_info[i].value); + for (i = 0 ; i < ARRAY_SIZE(msr_info); i++) { + if (is_64bit_host || !msr_info[i].is_64bit_only) { + test_msr_rw(&msr_info[i], msr_info[i].value); + } else { + test_wrmsr_fault(&msr_info[i], msr_info[i].value); + test_rdmsr_fault(&msr_info[i]); + } + } return report_summary(); } diff --git a/x86/unittests.cfg b/x86/unittests.cfg index c2608bc..29cfe51 100644 --- a/x86/unittests.cfg +++ b/x86/unittests.cfg @@ -168,9 +168,11 @@ arch = x86_64 [msr] # Use GenuineIntel to ensure SYSENTER MSRs are fully preserved, and to test -# SVM emulation of Intel CPU behavior. +# SVM emulation of Intel CPU behavior. Use the host CPU model so that 64-bit +# support follows the host kernel. Running a 32-bit guest on a 64-bit host +# will fail due to shortcomings in KVM. file = msr.flat -extra_params = -cpu qemu64,vendor=GenuineIntel +extra_params = -cpu host,vendor=GenuineIntel [pmu] file = pmu.flat -- 2.31.1.498.g6c1eba8ee3d-goog