Now that KVM disallows disabling HLT-exiting after vCPUs have been created, i.e. now that it's impossible for kvm_hlt_in_guest() to change while vCPUs are running, apply KVM's PV_UNHALT quirk only when userspace is setting guest CPUID. Opportunistically rename the helper to make it clear that KVM's behavior is a quirk that should never have been added. KVM's documentation explicitly states that userspace should not advertise PV_UNHALT if HLT-exiting is disabled, but for unknown reasons, commit caa057a2cad6 ("KVM: X86: Provide a capability to disable HLT intercepts") didn't stop at documenting the requirement and also massaged the incoming guest CPUID. Unfortunately, it's quite likely that userspace has come to rely on KVM's behavior, i.e. the code can't simply be deleted. The only reason KVM doesn't have an "official" quirk is that there is no known use case where disabling the quirk would make sense, i.e. letting userspace disable the quirk would further increase KVM's burden without any benefit. Reviewed-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx> --- arch/x86/kvm/cpuid.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index e8c30de2faa9..3ba0e6a67823 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -157,6 +157,8 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu) return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures); } +static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu); + /* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2, int nent) @@ -176,6 +178,7 @@ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 * Note! @e2 and @nent track the _old_ CPUID entries! */ kvm_update_cpuid_runtime(vcpu); + kvm_apply_cpuid_pv_features_quirk(vcpu); if (nent != vcpu->arch.cpuid_nent) return -EINVAL; @@ -246,18 +249,17 @@ static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcp vcpu->arch.cpuid_nent, base); } -static void kvm_update_pv_runtime(struct kvm_vcpu *vcpu) +static u32 kvm_apply_cpuid_pv_features_quirk(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best = kvm_find_kvm_cpuid_features(vcpu); - vcpu->arch.pv_cpuid.features = 0; + if (!best) + return 0; - /* - * save the feature bitmap to avoid cpuid lookup for every PV - * operation - */ - if (best) - vcpu->arch.pv_cpuid.features = best->eax; + if (kvm_hlt_in_guest(vcpu->kvm)) + best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); + + return best->eax; } /* @@ -279,7 +281,6 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e int nent) { struct kvm_cpuid_entry2 *best; - struct kvm_hypervisor_cpuid kvm_cpuid; best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); if (best) { @@ -306,13 +307,6 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e cpuid_entry_has(best, X86_FEATURE_XSAVEC))) best->ebx = xstate_required_size(vcpu->arch.xcr0, true); - kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE); - if (kvm_cpuid.base) { - best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base); - if (kvm_hlt_in_guest(vcpu->kvm) && best) - best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT); - } - if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) { best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT); if (best) @@ -396,7 +390,7 @@ void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vcpu->arch.guest_supported_xcr0 = cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent); - kvm_update_pv_runtime(vcpu); + vcpu->arch.pv_cpuid.features = kvm_apply_cpuid_pv_features_quirk(vcpu); vcpu->arch.is_amd_compatible = guest_cpuid_is_amd_or_hygon(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); -- 2.47.0.338.g60cca15819-goog