If Intel/AMD implements MWAIT, we expect that it works well and only reject known bugs; no reason to do it the other way around for minor vendors. (Not that they are relevant ATM.) This allows further simplification of kvm_mwait_in_guest(). Signed-off-by: Radim Krčmář <rkrcmar@xxxxxxxxxx> --- arch/x86/kvm/x86.h | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 8ea4e80c24d1..b49add75ea35 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -216,17 +216,9 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) static inline bool kvm_mwait_in_guest(void) { - if (!cpu_has(&boot_cpu_data, X86_FEATURE_MWAIT)) - return false; - - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - return !boot_cpu_has_bug(X86_BUG_AMD_E400); - case X86_VENDOR_INTEL: - return !boot_cpu_has_bug(X86_BUG_MONITOR); - default: - return false; - } + return boot_cpu_has(X86_FEATURE_MWAIT) && + !boot_cpu_has_bug(X86_BUG_AMD_E400) && + !boot_cpu_has_bug(X86_BUG_MONITOR); } #endif -- 2.12.2