From: Like Xu <likexu@xxxxxxxxxxx> Intel SDM says the CPUID.0xd.EBX reports the maximum size required by enabled features in XCR0. Add a simple test that writes two different non #GP values via __xsetbv() and verify that the cpuid data is updated. Opportunistically, move the __x{s,g}etbv helpers to the x86_64/processor.h Signed-off-by: Like Xu <likexu@xxxxxxxxxxx> --- Related link: https://lore.kernel.org/kvm/20220119070427.33801-1-likexu@xxxxxxxxxxx/ .../selftests/kvm/include/x86_64/processor.h | 18 ++++++++++ tools/testing/selftests/kvm/x86_64/amx_test.c | 18 ---------- .../testing/selftests/kvm/x86_64/cpuid_test.c | 34 +++++++++++++++++-- 3 files changed, 49 insertions(+), 21 deletions(-) diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 122447827954..65097ca6d7b2 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -296,6 +296,24 @@ static inline void cpuid(uint32_t *eax, uint32_t *ebx, : "memory"); } +static inline u64 __xgetbv(u32 index) +{ + u32 eax, edx; + + asm volatile("xgetbv;" + : "=a" (eax), "=d" (edx) + : "c" (index)); + return eax + ((u64)edx << 32); +} + +static inline void __xsetbv(u32 index, u64 value) +{ + u32 eax = value; + u32 edx = value >> 32; + + asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index)); +} + #define SET_XMM(__var, __xmm) \ asm volatile("movq %0, %%"#__xmm : : "r"(__var) : #__xmm) diff --git a/tools/testing/selftests/kvm/x86_64/amx_test.c b/tools/testing/selftests/kvm/x86_64/amx_test.c index 523c1e99ed64..c3cbb2dc450d 100644 --- a/tools/testing/selftests/kvm/x86_64/amx_test.c +++ b/tools/testing/selftests/kvm/x86_64/amx_test.c @@ -78,24 +78,6 @@ struct xtile_info { static struct xtile_info xtile; -static inline u64 __xgetbv(u32 index) -{ - u32 eax, edx; - - asm volatile("xgetbv;" - : "=a" (eax), "=d" (edx) - : "c" (index)); - return eax + ((u64)edx << 32); -} - -static inline void __xsetbv(u32 index, u64 value) -{ - u32 eax = value; - u32 edx = value >> 32; - - asm volatile("xsetbv" :: "a" (eax), "d" (edx), "c" (index)); -} - static inline void __ldtilecfg(void *cfg) { asm volatile(".byte 0xc4,0xe2,0x78,0x49,0x00" diff --git a/tools/testing/selftests/kvm/x86_64/cpuid_test.c b/tools/testing/selftests/kvm/x86_64/cpuid_test.c index 16d2465c5634..169ec54a928c 100644 --- a/tools/testing/selftests/kvm/x86_64/cpuid_test.c +++ b/tools/testing/selftests/kvm/x86_64/cpuid_test.c @@ -20,8 +20,7 @@ struct { u32 index; } mangled_cpuids[] = { /* - * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR, - * which are not controlled for by this test. + * These entries depend on the vCPU's XCR0 register and IA32_XSS MSR. */ {.function = 0xd, .index = 0}, {.function = 0xd, .index = 1}, @@ -55,6 +54,31 @@ static void test_cpuid_40000000(struct kvm_cpuid2 *guest_cpuid) GUEST_ASSERT(eax == 0x40000001); } +static void test_cpuid_d(struct kvm_cpuid2 *guest_cpuid) +{ + uint64_t cr4; + u32 eax, ebx, ecx, edx; + u32 before, after; + + cr4 = get_cr4(); + cr4 |= X86_CR4_OSXSAVE; + set_cr4(cr4); + + __xsetbv(0x0, 0x1); + eax = 0xd; + ebx = ecx = edx = 0; + cpuid(&eax, &ebx, &ecx, &edx); + before = ebx; + + __xsetbv(0x0, 0x3); + eax = 0xd; + ebx = ecx = edx = 0; + cpuid(&eax, &ebx, &ecx, &edx); + after = ebx; + + GUEST_ASSERT(before != after); +} + static void guest_main(struct kvm_cpuid2 *guest_cpuid) { GUEST_SYNC(1); @@ -65,6 +89,10 @@ static void guest_main(struct kvm_cpuid2 *guest_cpuid) test_cpuid_40000000(guest_cpuid); + GUEST_SYNC(3); + + test_cpuid_d(guest_cpuid); + GUEST_DONE(); } @@ -200,7 +228,7 @@ int main(void) vcpu_args_set(vm, VCPU_ID, 1, cpuid_gva); - for (stage = 0; stage < 3; stage++) + for (stage = 0; stage < 4; stage++) run_vcpu(vm, VCPU_ID, stage); set_cpuid_after_run(vm, cpuid2); -- 2.33.1