The CPU_BASED_VM_EXEC_CONTROL VMCS field is write-only, so we can cache it in the vcpu structure and avoid a costly vmcs_read32() every time we want to change a bit. Signed-off-by: Avi Kivity <avi@xxxxxxxxxx> --- arch/x86/kvm/vmx.c | 60 +++++++++++++++++++++++++-------------------------- 1 files changed, 29 insertions(+), 31 deletions(-) diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f76137c..ee1cd1a 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -130,6 +130,7 @@ struct vcpu_vmx { u8 fail; u32 exit_intr_info; u32 idt_vectoring_info; + u32 cpu_based_vm_exec_control; struct shared_msr_entry *guest_msrs; int nmsrs; int save_nmsrs; @@ -643,6 +644,18 @@ static void vmcs_set_bits(unsigned long field, u32 mask) vmcs_writel(field, vmcs_readl(field) | mask); } +static void set_cpu_based_vm_exec_ctrl_bits(struct vcpu_vmx *vmx, u32 mask) +{ + vmx->cpu_based_vm_exec_control |= mask; + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx->cpu_based_vm_exec_control); +} + +static void clear_cpu_based_vm_exec_ctrl_bits(struct vcpu_vmx *vmx, u32 mask) +{ + vmx->cpu_based_vm_exec_control &= ~mask; + vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx->cpu_based_vm_exec_control); +} + static void update_exception_bitmap(struct kvm_vcpu *vcpu) { u32 eb; @@ -1932,18 +1945,16 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, vmx_decache_cr3(vcpu); if (!(cr0 & X86_CR0_PG)) { /* From paging/starting to nonpaging */ - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, - vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) | - (CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING)); + set_cpu_based_vm_exec_ctrl_bits(to_vmx(vcpu), + CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } else if (!is_paging(vcpu)) { /* From nonpaging to paging */ - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, - vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) & - ~(CPU_BASED_CR3_LOAD_EXITING | - CPU_BASED_CR3_STORE_EXITING)); + clear_cpu_based_vm_exec_ctrl_bits(to_vmx(vcpu), + CPU_BASED_CR3_LOAD_EXITING | + CPU_BASED_CR3_STORE_EXITING); vcpu->arch.cr0 = cr0; vmx_set_cr4(vcpu, kvm_read_cr4(vcpu)); } @@ -2622,6 +2633,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) exec_control |= CPU_BASED_CR3_STORE_EXITING | CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_INVLPG_EXITING; + vmx->cpu_based_vm_exec_control = exec_control; vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control); if (cpu_has_secondary_exec_ctrls()) { @@ -2873,17 +2885,12 @@ out: static void enable_irq_window(struct kvm_vcpu *vcpu) { - u32 cpu_based_vm_exec_control; - - cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); + set_cpu_based_vm_exec_ctrl_bits(to_vmx(vcpu), + CPU_BASED_VIRTUAL_INTR_PENDING); } static void enable_nmi_window(struct kvm_vcpu *vcpu) { - u32 cpu_based_vm_exec_control; - if (!cpu_has_virtual_nmis()) { enable_irq_window(vcpu); return; @@ -2893,9 +2900,8 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu) enable_irq_window(vcpu); return; } - cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); + set_cpu_based_vm_exec_ctrl_bits(to_vmx(vcpu), + CPU_BASED_VIRTUAL_NMI_PENDING); } static void vmx_inject_irq(struct kvm_vcpu *vcpu) @@ -3408,13 +3414,9 @@ static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) static int handle_interrupt_window(struct kvm_vcpu *vcpu) { - u32 cpu_based_vm_exec_control; - /* clear pending irq */ - cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); - + clear_cpu_based_vm_exec_ctrl_bits(to_vmx(vcpu), + CPU_BASED_VIRTUAL_INTR_PENDING); kvm_make_request(KVM_REQ_EVENT, vcpu); ++vcpu->stat.irq_window_exits; @@ -3671,12 +3673,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu) static int handle_nmi_window(struct kvm_vcpu *vcpu) { - u32 cpu_based_vm_exec_control; - - /* clear pending NMI */ - cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); - cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; - vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); + clear_cpu_based_vm_exec_ctrl_bits(to_vmx(vcpu), + CPU_BASED_VIRTUAL_NMI_PENDING); ++vcpu->stat.nmi_window_exits; kvm_make_request(KVM_REQ_EVENT, vcpu); @@ -3691,7 +3689,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) u32 cpu_exec_ctrl; bool intr_window_requested; - cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); + cpu_exec_ctrl = vmx->cpu_based_vm_exec_control; intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; while (!guest_state_valid(vcpu)) { -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html