The following patch implements a simple busy-spin detector. It considers a vcpu as busy-spinning if there are two consecutive exits due to external interrupt on the same RIP, and sleeps for 100us in that case. It is very likely that if the vcpu is making progress it will either exit for other reasons or change RIP. The percentage numbers below represent improvement in kernel build time in comparison with mainline (RHEL 5.4 guest). make -j16, 8 cpu host: smp 16: 3% smp 18: 10% smp 32: 14% smp 4, make -j4, pinned to 2 cpus: 4% smp 8, make -j8, pinned to 2 cpus: 5% Index: kvm/arch/x86/include/asm/kvm_host.h =================================================================== --- kvm.orig/arch/x86/include/asm/kvm_host.h +++ kvm/arch/x86/include/asm/kvm_host.h @@ -301,6 +301,8 @@ struct kvm_vcpu_arch { unsigned long mmu_seq; } update_pte; + unsigned long last_rip; + struct fpu guest_fpu; gva_t mmio_fault_cr2; @@ -653,6 +655,8 @@ void kvm_disable_tdp(void); int complete_pio(struct kvm_vcpu *vcpu); bool kvm_check_iopl(struct kvm_vcpu *vcpu); +void kvm_detect_spin(struct kvm_vcpu *vcpu); + struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn); static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) Index: kvm/arch/x86/kvm/svm.c =================================================================== --- kvm.orig/arch/x86/kvm/svm.c +++ kvm/arch/x86/kvm/svm.c @@ -1558,8 +1558,10 @@ static int nmi_interception(struct vcpu_ static int intr_interception(struct vcpu_svm *svm) { + if (!svm_has(SVM_FEATURE_PAUSE_FILTER)) + kvm_detect_spin(&svm->vcpu); ++svm->vcpu.stat.irq_exits; - return 1; + return 2; } static int nop_on_interception(struct vcpu_svm *svm) Index: kvm/arch/x86/kvm/vmx.c =================================================================== --- kvm.orig/arch/x86/kvm/vmx.c +++ kvm/arch/x86/kvm/vmx.c @@ -3116,7 +3116,9 @@ static int handle_exception(struct kvm_v static int handle_external_interrupt(struct kvm_vcpu *vcpu) { ++vcpu->stat.irq_exits; - return 1; + if (!cpu_has_vmx_ple()) + kvm_detect_spin(vcpu); + return 2; } static int handle_triple_fault(struct kvm_vcpu *vcpu) Index: kvm/arch/x86/kvm/x86.c =================================================================== --- kvm.orig/arch/x86/kvm/x86.c +++ kvm/arch/x86/kvm/x86.c @@ -4523,6 +4523,17 @@ static void inject_pending_event(struct } } +void kvm_detect_spin(struct kvm_vcpu *vcpu) +{ + unsigned long rip = kvm_rip_read(vcpu); + + if (vcpu->arch.last_rip == rip) + kvm_vcpu_on_spin(vcpu); + + vcpu->arch.last_rip = rip; +} +EXPORT_SYMBOL_GPL(kvm_detect_spin); + static int vcpu_enter_guest(struct kvm_vcpu *vcpu) { int r; @@ -4654,6 +4665,8 @@ static int vcpu_enter_guest(struct kvm_v kvm_lapic_sync_from_vapic(vcpu); r = kvm_x86_ops->handle_exit(vcpu); + if (r == 1) + vcpu->arch.last_rip = ~(0UL); out: return r; } -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html