On Thu, Sep 22, 2022, Vitaly Kuznetsov wrote: > diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c > index d4bd18bc580d..18b44450dfb8 100644 > --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c > +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c > @@ -46,20 +46,33 @@ struct hcall_data { > > static void guest_msr(struct msr_data *msr) > { > - uint64_t ignored; > + uint64_t msr_val = 0; > uint8_t vector; > > GUEST_ASSERT(msr->idx); > > - if (!msr->write) > - vector = rdmsr_safe(msr->idx, &ignored); > - else > + if (!msr->write) { > + vector = rdmsr_safe(msr->idx, &msr_val); This is subtly going to do weird things if the RDMSR faults. rdmsr_safe() overwrites @val with whatever happens to be in EDX:EAX if the RDMSR faults, i.e. this may yield garbage instead of '0'. Arguably rdmsr_safe() is a bad API, but at the same time the caller really shouldn't consume the result if RDMSR faults (though aligning with the kernel is also valuable). Aha! Idea. Assuming none of the MSRs are write-only, what about adding a prep patch to rework this code so that it verifies RDMSR returns what was written when a fault didn't occur. uint8_t vector = 0; uint64_t msr_val; GUEST_ASSERT(msr->idx); if (msr->write) vector = wrmsr_safe(msr->idx, msr->write_val); if (!vector) vector = rdmsr_safe(msr->idx, &msr_val); if (msr->fault_expected) GUEST_ASSERT_2(vector == GP_VECTOR, msr->idx, vector); else GUEST_ASSERT_2(!vector, msr->idx, vector); if (vector) goto done; GUEST_ASSERT_2(msr_val == msr->write_val, msr_val, msr->write_val); done: GUEST_DONE(); and then this patch can just slot in the extra check: uint8_t vector = 0; uint64_t msr_val; GUEST_ASSERT(msr->idx); if (msr->write) vector = wrmsr_safe(msr->idx, msr->write_val); if (!vector) vector = rdmsr_safe(msr->idx, &msr_val); if (msr->fault_expected) GUEST_ASSERT_2(vector == GP_VECTOR, msr->idx, vector); else GUEST_ASSERT_2(!vector, msr->idx, vector); if (vector) goto done; GUEST_ASSERT_2(msr_val == msr->write_val, msr_val, msr->write_val); /* Invariant TSC bit appears when TSC invariant control MSR is written to */ if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) { if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT)) GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC)); else GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) == !!(msr_val & HV_INVARIANT_TSC_EXPOSED)); } done: GUEST_DONE();