APM2 states that VMRUN and VMLOAD should canonicalize all base addresses in the segment registers that have been loaded respectively. Split up in test_canonicalization the TEST_CANONICAL for VMLOAD and VMRUN. Added the respective test for KERNEL_GS. In general the canonicalization should be from 48/57 to 64 based on LA57. Signed-off-by: Lara Lazier <laramglazier@xxxxxxxxx> --- lib/x86/processor.h | 4 +++- x86/svm_tests.c | 54 +++++++++++++++++++++++++++++++-------------- 2 files changed, 41 insertions(+), 17 deletions(-) diff --git a/lib/x86/processor.h b/lib/x86/processor.h index f4d1757..ae708ac 100644 --- a/lib/x86/processor.h +++ b/lib/x86/processor.h @@ -619,7 +619,9 @@ static inline void write_pkru(u32 pkru) static inline bool is_canonical(u64 addr) { - return (s64)(addr << 16) >> 16 == addr; + int shift_amt = (this_cpu_has(X86_FEATURE_LA57)) ? 7 /* 57 bits virtual */ + : 16 /* 48 bits virtual */; + return (s64)(addr << shift_amt) >> shift_amt == addr; } static inline void clear_bit(int bit, u8 *addr) diff --git a/x86/svm_tests.c b/x86/svm_tests.c index 7c7b19d..273b80b 100644 --- a/x86/svm_tests.c +++ b/x86/svm_tests.c @@ -2460,32 +2460,54 @@ static void test_msrpm_iopm_bitmap_addrs(void) vmcb->control.intercept = saved_intercept; } -#define TEST_CANONICAL(seg_base, msg) \ - saved_addr = seg_base; \ +#define TEST_CANONICAL_VMRUN(seg_base, msg) \ + saved_addr = seg_base; \ seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \ - report(svm_vmrun() == SVM_EXIT_VMMCALL, "Test %s.base for canonical form: %lx", msg, seg_base); \ + return_value = svm_vmrun(); \ + if (is_canonical(seg_base)) { \ + report(return_value == SVM_EXIT_VMMCALL, \ + "Test %s.base for canonical form: %lx", msg, seg_base); \ + } else { \ + report(false, \ + "Test a %s.base not canonicalized: %lx", msg, seg_base); \ + } \ + seg_base = saved_addr; + + +#define TEST_CANONICAL_VMLOAD(seg_base, msg) \ + saved_addr = seg_base; \ + seg_base = (seg_base & ((1ul << addr_limit) - 1)) | noncanonical_mask; \ + asm volatile ("vmload %0" : : "a"(vmcb_phys) : "memory"); \ + asm volatile ("vmsave %0" : : "a"(vmcb_phys) : "memory"); \ + report(is_canonical(seg_base), \ + "Test %s.base for canonical form: %lx", msg, seg_base); \ seg_base = saved_addr; /* * VMRUN canonicalizes (i.e., sign-extend to bit 63) all base addresses • in the segment registers that have been loaded. */ -static void test_vmrun_canonicalization(void) +static void test_canonicalization(void) { u64 saved_addr; - u8 addr_limit = cpuid_maxphyaddr(); + u64 return_value; + u64 addr_limit; + u64 vmcb_phys = virt_to_phys(vmcb); + + addr_limit = (this_cpu_has(X86_FEATURE_LA57)) ? 57 : 48; u64 noncanonical_mask = NONCANONICAL & ~((1ul << addr_limit) - 1); - TEST_CANONICAL(vmcb->save.es.base, "ES"); - TEST_CANONICAL(vmcb->save.cs.base, "CS"); - TEST_CANONICAL(vmcb->save.ss.base, "SS"); - TEST_CANONICAL(vmcb->save.ds.base, "DS"); - TEST_CANONICAL(vmcb->save.fs.base, "FS"); - TEST_CANONICAL(vmcb->save.gs.base, "GS"); - TEST_CANONICAL(vmcb->save.gdtr.base, "GDTR"); - TEST_CANONICAL(vmcb->save.ldtr.base, "LDTR"); - TEST_CANONICAL(vmcb->save.idtr.base, "IDTR"); - TEST_CANONICAL(vmcb->save.tr.base, "TR"); + TEST_CANONICAL_VMLOAD(vmcb->save.fs.base, "FS"); + TEST_CANONICAL_VMLOAD(vmcb->save.gs.base, "GS"); + TEST_CANONICAL_VMLOAD(vmcb->save.ldtr.base, "LDTR"); + TEST_CANONICAL_VMLOAD(vmcb->save.tr.base, "TR"); + TEST_CANONICAL_VMLOAD(vmcb->save.kernel_gs_base, "KERNEL GS"); + TEST_CANONICAL_VMRUN(vmcb->save.es.base, "ES"); + TEST_CANONICAL_VMRUN(vmcb->save.cs.base, "CS"); + TEST_CANONICAL_VMRUN(vmcb->save.ss.base, "SS"); + TEST_CANONICAL_VMRUN(vmcb->save.ds.base, "DS"); + TEST_CANONICAL_VMRUN(vmcb->save.gdtr.base, "GDTR"); + TEST_CANONICAL_VMRUN(vmcb->save.idtr.base, "IDTR"); } static void svm_guest_state_test(void) @@ -2497,7 +2519,7 @@ static void svm_guest_state_test(void) test_cr4(); test_dr(); test_msrpm_iopm_bitmap_addrs(); - test_vmrun_canonicalization(); + test_canonicalization(); } static void __svm_npt_rsvd_bits_test(u64 *pxe, u64 rsvd_bits, u64 efer, -- 2.25.1