If the guest fpu is loaded, there is nothing interesing about cr0.ts; let the guest play with it as it will. This makes context switches between fpu intensive guest processes faster, as we won't trap the clts and cr0 write instructions. Signed-off-by: Avi Kivity <avi@xxxxxxxxxx> --- arch/x86/kvm/vmx.c | 11 +++++++++++ 1 files changed, 11 insertions(+), 0 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7e0b45e..81dc432 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -797,12 +797,23 @@ static void vmx_fpu_activate(struct kvm_vcpu *vcpu) if (kvm_read_cr0_bits(vcpu, X86_CR0_TS)) vmcs_set_bits(GUEST_CR0, X86_CR0_TS); update_exception_bitmap(vcpu); + vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS; + vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); } static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu) { + ulong old_ts, old_cr0; + + old_ts = kvm_read_cr0_bits(vcpu, X86_CR0_TS); vmcs_set_bits(GUEST_CR0, X86_CR0_TS); update_exception_bitmap(vcpu); + vcpu->arch.cr0_guest_owned_bits = 0; + vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits); + old_cr0 = vcpu->arch.cr0; + vcpu->arch.cr0 = (vcpu->arch.cr0 & ~X86_CR0_TS) | old_ts; + if (vcpu->arch.cr0 != old_cr0) + vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); } static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu) -- 1.6.5.3 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html