Re: [PATCH v3] KVM: VMX: enable acknowledge interupt on vmexit

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Jan 28, 2013 at 08:54:07AM +0800, Yang Zhang wrote:
> From: Yang Zhang <yang.z.zhang@xxxxxxxxx>
> 
> The "acknowledge interrupt on exit" feature controls processor behavior
> for external interrupt acknowledgement. When this control is set, the
> processor acknowledges the interrupt controller to acquire the
> interrupt vector on VM exit.
> 
> After enabling this feature, an interrupt which arrived when target cpu is
> running in vmx non-root mode will be handled by vmx handler instead of handler
> in idt. Currently, vmx handler only fakes an interrupt stack and jump to idt
> table to let real handler to handle it. Further, we will recognize the interrupt
> and only delivery the interrupt which not belong to current vcpu through idt table.
> The interrupt which belonged to current vcpu will be handled inside vmx handler.
> This will reduce the interrupt handle cost of KVM.
> 
> Refer to Intel SDM volum 3, chapter 33.2.
> 
> Signed-off-by: Yang Zhang <yang.z.zhang@xxxxxxxxx>
> ---
>  arch/x86/include/asm/kvm_host.h |    2 +
>  arch/x86/kvm/svm.c              |    6 ++++
>  arch/x86/kvm/vmx.c              |   61 ++++++++++++++++++++++++++++++++++++--
>  arch/x86/kvm/x86.c              |    3 +-
>  4 files changed, 67 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 77d56a4..07daf10 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -340,6 +340,7 @@ struct kvm_vcpu_arch {
>  	unsigned long cr8;
>  	u32 hflags;
>  	u64 efer;
> +	unsigned long host_idt_base;
Should be in vmx.

>  	u64 apic_base;
>  	struct kvm_lapic *apic;    /* kernel irqchip context */
>  	unsigned long apic_attention;
> @@ -725,6 +726,7 @@ struct kvm_x86_ops {
>  	int (*check_intercept)(struct kvm_vcpu *vcpu,
>  			       struct x86_instruction_info *info,
>  			       enum x86_intercept_stage stage);
> +	void (*handle_external_intr)(struct kvm_vcpu *vcpu);
>  };
>  
>  struct kvm_arch_async_pf {
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index d29d3cd..c283185 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -4227,6 +4227,11 @@ out:
>  	return ret;
>  }
>  
> +static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
> +{
> +	local_irq_enable();
> +}
> +
>  static struct kvm_x86_ops svm_x86_ops = {
>  	.cpu_has_kvm_support = has_svm,
>  	.disabled_by_bios = is_disabled,
> @@ -4318,6 +4323,7 @@ static struct kvm_x86_ops svm_x86_ops = {
>  	.set_tdp_cr3 = set_tdp_cr3,
>  
>  	.check_intercept = svm_check_intercept,
> +	.handle_external_intr = svm_handle_external_intr,
>  };
>  
>  static int __init svm_init(void)
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index 02eeba8..243ce45 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -2565,7 +2565,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
>  #ifdef CONFIG_X86_64
>  	min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
>  #endif
> -	opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
> +	opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
> +		VM_EXIT_ACK_INTR_ON_EXIT;
>  	if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
>  				&_vmexit_control) < 0)
>  		return -EIO;
> @@ -3742,7 +3743,7 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
>   * Note that host-state that does change is set elsewhere. E.g., host-state
>   * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
>   */
> -static void vmx_set_constant_host_state(void)
> +static void vmx_set_constant_host_state(struct kvm_vcpu *vcpu)
>  {
>  	u32 low32, high32;
>  	unsigned long tmpl;
> @@ -3770,6 +3771,7 @@ static void vmx_set_constant_host_state(void)
>  
>  	native_store_idt(&dt);
>  	vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
> +	vcpu->arch.host_idt_base = dt.address;
>  
>  	vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
>  
> @@ -3884,7 +3886,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
>  
>  	vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
>  	vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
> -	vmx_set_constant_host_state();
> +	vmx_set_constant_host_state(&vmx->vcpu);
>  #ifdef CONFIG_X86_64
>  	rdmsrl(MSR_FS_BASE, a);
>  	vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
> @@ -6094,6 +6096,56 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
>  	}
>  }
>  
> +static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
> +{
> +	u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
> +	if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
> +			== (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
> +		unsigned int vector;
> +		unsigned long entry;
> +		gate_desc *desc;
> +
> +		vector =  exit_intr_info & INTR_INFO_VECTOR_MASK;
> +#ifdef CONFIG_X86_64
> +		desc = (void *)vcpu->arch.host_idt_base + vector * 16;
> +#else
> +		desc = (void *)vcpu->arch.host_idt_base + vector * 8;
> +#endif
> +
> +		entry = gate_offset(*desc);
> +		asm(
> +			"mov %0, %%" _ASM_DX " \n\t"
> +#ifdef CONFIG_X86_64
> +			"mov %%" _ASM_SP ", %%" _ASM_BX " \n\t"
> +			"and $0xfffffffffffffff0, %%" _ASM_SP " \n\t"
> +			"mov %%ss, %%" _ASM_AX " \n\t"
> +			"push %%" _ASM_AX " \n\t"
> +			"push %%" _ASM_BX " \n\t"
> +#endif
> +			"pushf \n\t"
> +			"pop %%" _ASM_AX " \n\t"
> +			"or $0x200, %%" _ASM_AX " \n\t"
> +			"push %%" _ASM_AX " \n\t"
> +			"mov %%cs, %%" _ASM_AX " \n\t"
> +			"push %%" _ASM_AX " \n\t"
> +			"push intr_return \n\t"
> +			"jmp *%% " _ASM_DX " \n\t"
> +			"1: \n\t"
> +			".pushsection .rodata \n\t"
> +			".global intr_return \n\t"
> +			"intr_return: " _ASM_PTR " 1b \n\t"
> +			".popsection \n\t"
> +			: : "m"(entry) :
> +#ifdef CONFIG_X86_64
> +			"rax", "rbx", "rdx"
> +#else
> +			"eax", "edx"
> +#endif
> +			);
> +	} else
> +		local_irq_enable();
> +}
> +
>  static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
>  {
>  	u32 exit_intr_info;
> @@ -6764,7 +6816,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
>  	 * Other fields are different per CPU, and will be set later when
>  	 * vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
>  	 */
> -	vmx_set_constant_host_state();
> +	vmx_set_constant_host_state(vcpu);
>  
>  	/*
>  	 * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
> @@ -7361,6 +7413,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
>  	.set_tdp_cr3 = vmx_set_cr3,
>  
>  	.check_intercept = vmx_check_intercept,
> +	.handle_external_intr = vmx_handle_external_intr,
>  };
>  
>  static int __init vmx_init(void)
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index b9f5529..676b399 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -5767,7 +5767,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
>  
>  	vcpu->mode = OUTSIDE_GUEST_MODE;
>  	smp_wmb();
> -	local_irq_enable();
> +
I asked to add comment how interrupt is enabled here.

> +	kvm_x86_ops->handle_external_intr(vcpu);
>  
>  	++vcpu->stat.exits;
>  
> -- 
> 1.7.1

--
			Gleb.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux