Re: [PATCH v2] KVM: VMX: Execute WBINVD to keep data consistency with assigned devices

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Sheng Yang wrote:
> Some guest device driver may leverage the "Non-Snoop" I/O, and explicitly
> WBINVD or CLFLUSH to a RAM space. Since migration may occur before WBINVD or
> CLFLUSH, we need to maintain data consistency either by:
> 1: flushing cache (wbinvd) when the guest is scheduled out if there is no
> wbinvd exit, or
> 2: execute wbinvd on all dirty physical CPUs when guest wbinvd exits.
> 
> Signed-off-by: Yaozu (Eddie) Dong <eddie.dong@xxxxxxxxx>
> Signed-off-by: Sheng Yang <sheng@xxxxxxxxxxxxxxx>
> ---
> Jan-
> 
> I've check if we can make it more generic. But the logic here heavily depends on
> if processor have WBINVD exit feature, and the common part with SVM is no more
> than 10 lines, all in the branch of if statement.

AFAIK, all AMD processors with SVM support have wbinvd trapping. So you
can simply move the VMX part which deals with cpu_has_vmx_wbinvd_exit
into generic services to call them from SVM as well.

Or is wbinvd emulation for device pass-through an Intel-only issue? Joerg?

> So I think it's fine to keep
> them there. Maybe wbinvd_ipi() can be moved, but it's somehow strange for KVM
> scope. Any suggestion to make this wrap function more clean? I hope we have
> an marco can do that...
> 
>  arch/x86/include/asm/kvm_host.h |    3 ++
>  arch/x86/kvm/emulate.c          |    5 +++-
>  arch/x86/kvm/svm.c              |    6 +++++
>  arch/x86/kvm/vmx.c              |   45 ++++++++++++++++++++++++++++++++++++++-
>  arch/x86/kvm/x86.c              |    6 +++++
>  5 files changed, 63 insertions(+), 2 deletions(-)
> 
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index a57cdea..1c392c9 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -514,6 +514,8 @@ struct kvm_x86_ops {
>  
>  	void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
>  
> +	void (*execute_wbinvd)(struct kvm_vcpu *vcpu);
> +
>  	const struct trace_print_flags *exit_reasons_str;
>  };
>  
> @@ -571,6 +573,7 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
>  int kvm_emulate_halt(struct kvm_vcpu *vcpu);
>  int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
>  int emulate_clts(struct kvm_vcpu *vcpu);
> +int emulate_wbinvd(struct kvm_vcpu *vcpu);
>  
>  void kvm_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
>  int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg);
> diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
> index abb8cec..085dcb7 100644
> --- a/arch/x86/kvm/emulate.c
> +++ b/arch/x86/kvm/emulate.c
> @@ -3138,8 +3138,11 @@ twobyte_insn:
>  		emulate_clts(ctxt->vcpu);
>  		c->dst.type = OP_NONE;
>  		break;
> -	case 0x08:		/* invd */
>  	case 0x09:		/* wbinvd */
> +		emulate_wbinvd(ctxt->vcpu);
> +		c->dst.type = OP_NONE;
> +		break;
> +	case 0x08:		/* invd */
>  	case 0x0d:		/* GrpP (prefetch) */
>  	case 0x18:		/* Grp16 (prefetch/nop) */
>  		c->dst.type = OP_NONE;
> diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
> index 587b99d..6929da1 100644
> --- a/arch/x86/kvm/svm.c
> +++ b/arch/x86/kvm/svm.c
> @@ -3424,6 +3424,10 @@ static bool svm_rdtscp_supported(void)
>  	return false;
>  }
>  
> +static void svm_execute_wbinvd(struct kvm_vcpu *vcpu)
> +{
> +}
> +
>  static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
>  {
>  	struct vcpu_svm *svm = to_svm(vcpu);
> @@ -3508,6 +3512,8 @@ static struct kvm_x86_ops svm_x86_ops = {
>  	.rdtscp_supported = svm_rdtscp_supported,
>  
>  	.set_supported_cpuid = svm_set_supported_cpuid,
> +
> +	.execute_wbinvd = svm_execute_wbinvd,
>  };
>  
>  static int __init svm_init(void)
> diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
> index e565689..fd6c7e6 100644
> --- a/arch/x86/kvm/vmx.c
> +++ b/arch/x86/kvm/vmx.c
> @@ -29,6 +29,7 @@
>  #include <linux/ftrace_event.h>
>  #include <linux/slab.h>
>  #include <linux/tboot.h>
> +#include <linux/cpumask.h>
>  #include "kvm_cache_regs.h"
>  #include "x86.h"
>  
> @@ -170,6 +171,8 @@ struct vcpu_vmx {
>  	u32 exit_reason;
>  
>  	bool rdtscp_enabled;
> +
> +	cpumask_t wbinvd_dirty_mask;
>  };
>  
>  static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
> @@ -412,6 +415,12 @@ static inline bool cpu_has_virtual_nmis(void)
>  	return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
>  }
>  
> +static inline bool cpu_has_vmx_wbinvd_exit(void)
> +{
> +	return vmcs_config.cpu_based_2nd_exec_ctrl &
> +		SECONDARY_EXEC_WBINVD_EXITING;
> +}
> +
>  static inline bool report_flexpriority(void)
>  {
>  	return flexpriority_enabled;
> @@ -874,6 +883,11 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
>  	preempt_enable();
>  }
>  
> +static void wbinvd_ipi(void *opaque)
> +{
> +	wbinvd();
> +}
> +
>  /*
>   * Switches to specified vcpu, until a matching vcpu_put(), but assumes
>   * vcpu mutex is already taken.
> @@ -905,6 +919,15 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
>  			 &per_cpu(vcpus_on_cpu, cpu));
>  		local_irq_enable();
>  
> +		/* Address WBINVD may be executed by guest */
> +		if (vcpu->kvm->arch.iommu_domain) {
> +			if (cpu_has_vmx_wbinvd_exit())
> +				cpu_set(cpu, vmx->wbinvd_dirty_mask);
> +			else if (vcpu->cpu != -1)
> +				smp_call_function_single(vcpu->cpu,
> +					wbinvd_ipi, NULL, 1);
> +		}
> +
>  		vcpu->cpu = cpu;
>  		/*
>  		 * Linux uses per-cpu TSS and GDT, so set these when switching
> @@ -3397,10 +3420,26 @@ static int handle_invlpg(struct kvm_vcpu *vcpu)
>  	return 1;
>  }
>  
> +static void vmx_execute_wbinvd(struct kvm_vcpu *vcpu)
> +{
> +	struct vcpu_vmx *vmx = to_vmx(vcpu);
> +
> +	if (!vcpu->kvm->arch.iommu_domain)
> +		return;
> +
> +	if (cpu_has_vmx_wbinvd_exit()) {
> +		smp_call_function_many(&vmx->wbinvd_dirty_mask,
> +				wbinvd_ipi, NULL, 1);
> +		cpus_clear(vmx->wbinvd_dirty_mask);

That's even simpler than I was expecting. Looks good.

> +	} else
> +		wbinvd();
> +
> +}
> +
>  static int handle_wbinvd(struct kvm_vcpu *vcpu)
>  {
>  	skip_emulated_instruction(vcpu);
> -	/* TODO: Add support for VT-d/pass-through device */
> +	vmx_execute_wbinvd(vcpu);
>  	return 1;
>  }
>  
> @@ -4132,6 +4171,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
>  			goto free_vmcs;
>  	}
>  
> +	cpus_clear(vmx->wbinvd_dirty_mask);
> +
>  	return &vmx->vcpu;
>  
>  free_vmcs:
> @@ -4350,6 +4391,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
>  	.rdtscp_supported = vmx_rdtscp_supported,
>  
>  	.set_supported_cpuid = vmx_set_supported_cpuid,
> +
> +	.execute_wbinvd = vmx_execute_wbinvd,
>  };
>  
>  static int __init vmx_init(void)
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index d0b9252..eba3a2b 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -3650,6 +3650,12 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
>  	return X86EMUL_CONTINUE;
>  }
>  
> +int emulate_wbinvd(struct kvm_vcpu *vcpu)
> +{
> +	kvm_x86_ops->execute_wbinvd(vcpu);
> +	return X86EMUL_CONTINUE;
> +}
> +
>  int emulate_clts(struct kvm_vcpu *vcpu)
>  {
>  	kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));

Jan

-- 
Siemens AG, Corporate Technology, CT T DE IT 1
Corporate Competence Center Embedded Linux
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux