Re: [PATCH v8 9/9] KVM: VMX: enable IPI virtualization

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, Apr 11, 2022, Zeng Guang wrote:
> @@ -4194,15 +4199,19 @@ static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
>  	struct vcpu_vmx *vmx = to_vmx(vcpu);
>  
>  	pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
> -	if (cpu_has_secondary_exec_ctrls()) {
> -		if (kvm_vcpu_apicv_active(vcpu))
> -			secondary_exec_controls_setbit(vmx,
> -				      SECONDARY_EXEC_APIC_REGISTER_VIRT |
> -				      SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
> -		else
> -			secondary_exec_controls_clearbit(vmx,
> -					SECONDARY_EXEC_APIC_REGISTER_VIRT |
> -					SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
> +
> +	if (kvm_vcpu_apicv_active(vcpu)) {
> +		secondary_exec_controls_setbit(vmx,
> +			      SECONDARY_EXEC_APIC_REGISTER_VIRT |
> +			      SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
> +		if (enable_ipiv)
> +			tertiary_exec_controls_setbit(vmx, TERTIARY_EXEC_IPI_VIRT);
> +	} else {
> +		secondary_exec_controls_clearbit(vmx,
> +			      SECONDARY_EXEC_APIC_REGISTER_VIRT |
> +			      SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);

Thanks for doing this, but can you move it to a separate patch?  Just in case
we're missing something and this somehow explodes.

> +		if (enable_ipiv)
> +			tertiary_exec_controls_clearbit(vmx, TERTIARY_EXEC_IPI_VIRT);
>  	}
>  
>  	vmx_update_msr_bitmap_x2apic(vcpu);
> @@ -4236,7 +4245,16 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
>  
>  static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx)
>  {
> -	return vmcs_config.cpu_based_3rd_exec_ctrl;
> +	u64 exec_control = vmcs_config.cpu_based_3rd_exec_ctrl;
> +
> +	/*
> +	 * IPI virtualization relies on APICv. Disable IPI virtualization if
> +	 * APICv is inhibited.
> +	 */
> +	if (!enable_ipiv || !kvm_vcpu_apicv_active(&vmx->vcpu))
> +		exec_control &= ~TERTIARY_EXEC_IPI_VIRT;
> +
> +	return exec_control;
>  }
>  
>  /*
> @@ -4384,10 +4402,37 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
>  	return exec_control;
>  }
>  
> +int vmx_get_pid_table_order(struct kvm_vmx *kvm_vmx)
> +{
> +	return get_order(kvm_vmx->kvm.arch.max_vcpu_ids * sizeof(*kvm_vmx->pid_table));

I think it's slightly less gross to take @kvm and then:

	return get_order(kvm->arch.max_vcpu_ids * sizeof(*to_kvm_vmx(kvm)->pid_table));

> +}
> +
> +static int vmx_alloc_ipiv_pid_table(struct kvm *kvm)
> +{
> +	struct page *pages;
> +	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
> +
> +	if (!irqchip_in_kernel(kvm) || !enable_ipiv)
> +		return 0;

Newline here please.

> +	if (kvm_vmx->pid_table)

Note, this check goes away if this ends up being called from a dedicated ioctl.

> +		return 0;
> +
> +	pages = alloc_pages(GFP_KERNEL | __GFP_ZERO,
> +			    vmx_get_pid_table_order(kvm_vmx));
> +

But no newline here please :-)

> +	if (!pages)
> +		return -ENOMEM;
> +
> +	kvm_vmx->pid_table = (void *)page_address(pages);
> +	return 0;
> +}
> +
>  #define VMX_XSS_EXIT_BITMAP 0
>  
>  static void init_vmcs(struct vcpu_vmx *vmx)
>  {
> +	struct kvm_vmx *kvm_vmx = to_kvm_vmx(vmx->vcpu.kvm);

Might be worth doing:

	struct kvm *kvm = vmx->vcpu.kvm;
	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);

The kvm_vmx->kvm.arch below is kinda funky.

Ah yeah, do that, then e.g. the kvm_pause_in_guest() call doesn't need to get
'kvm' itself.

> +
>  	if (nested)
>  		nested_vmx_set_vmcs_shadowing_bitmap();
>  
> @@ -4419,6 +4464,11 @@ static void init_vmcs(struct vcpu_vmx *vmx)
>  		vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
>  	}
>  
> +	if (vmx_can_use_ipiv(&vmx->vcpu)) {
> +		vmcs_write64(PID_POINTER_TABLE, __pa(kvm_vmx->pid_table));
> +		vmcs_write16(LAST_PID_POINTER_INDEX, kvm_vmx->kvm.arch.max_vcpu_ids - 1);
> +	}
> +
>  	if (!kvm_pause_in_guest(vmx->vcpu.kvm)) {
>  		vmcs_write32(PLE_GAP, ple_gap);
>  		vmx->ple_window = ple_window;
> @@ -7112,6 +7162,10 @@ static int vmx_vcpu_create(struct kvm_vcpu *vcpu)
>  			goto free_vmcs;
>  	}
>  
> +	if (vmx_can_use_ipiv(vcpu))
> +		WRITE_ONCE(to_kvm_vmx(vcpu->kvm)->pid_table[vcpu->vcpu_id],
> +			   __pa(&vmx->pi_desc) | PID_TABLE_ENTRY_VALID);
> +
>  	return 0;
>  
>  free_vmcs:
> @@ -7746,6 +7800,14 @@ static bool vmx_check_apicv_inhibit_reasons(enum kvm_apicv_inhibit reason)
>  	return supported & BIT(reason);
>  }
>  
> +static void vmx_vm_destroy(struct kvm *kvm)
> +{
> +	struct kvm_vmx *kvm_vmx = to_kvm_vmx(kvm);
> +
> +	if (kvm_vmx->pid_table)
> +		free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm_vmx));

free_pages() does the != 0 check, no need to handle that here.  I agree it feels
wierd, but it's well established behavior.

> +}
> +
>  static struct kvm_x86_ops vmx_x86_ops __initdata = {
>  	.name = "kvm_intel",
>  



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux