On Sun, Jun 13, 2010 at 03:24:06PM +0300, Nadav Har'El wrote: > This patch allows a guest to use the VMXON and VMXOFF instructions, and > emulates them accordingly. Basically this amounts to checking some > prerequisites, and then remembering whether the guest has enabled or disabled > VMX operation. > > Signed-off-by: Nadav Har'El <nyh@xxxxxxxxxx> > --- > --- .before/arch/x86/kvm/vmx.c 2010-06-13 15:01:28.000000000 +0300 > +++ .after/arch/x86/kvm/vmx.c 2010-06-13 15:01:28.000000000 +0300 > @@ -117,6 +117,16 @@ struct shared_msr_entry { > u64 mask; > }; > > +/* The nested_vmx structure is part of vcpu_vmx, and holds information we need > + * for correct emulation of VMX (i.e., nested VMX) on this vcpu. For example, > + * the current VMCS set by L1, a list of the VMCSs used to run the active > + * L2 guests on the hardware, and more. > + */ > +struct nested_vmx { > + /* Has the level1 guest done vmxon? */ > + bool vmxon; > +}; > + > struct vcpu_vmx { > struct kvm_vcpu vcpu; > struct list_head local_vcpus_link; > @@ -168,6 +178,9 @@ struct vcpu_vmx { > u32 exit_reason; > > bool rdtscp_enabled; > + > + /* Support for guest hypervisors (nested VMX) */ > + struct nested_vmx nested; > }; > > static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) > @@ -3353,6 +3366,93 @@ static int handle_vmx_insn(struct kvm_vc > return 1; > } > > +/* Emulate the VMXON instruction. > + * Currently, we just remember that VMX is active, and do not save or even > + * inspect the argument to VMXON (the so-called "VMXON pointer") because we > + * do not currently need to store anything in that guest-allocated memory > + * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their > + * argument is different from the VMXON pointer (which the spec says they do). > + */ > +static int handle_vmon(struct kvm_vcpu *vcpu) > +{ > + struct kvm_segment cs; > + struct vcpu_vmx *vmx = to_vmx(vcpu); > + > + /* The Intel VMX Instruction Reference lists a bunch of bits that > + * are prerequisite to running VMXON, most notably CR4.VMXE must be > + * set to 1. Otherwise, we should fail with #UD. We test these now: > + */ > + if (!nested) { > + kvm_queue_exception(vcpu, UD_VECTOR); > + return 1; > + } > + > + if (!(vcpu->arch.cr4 & X86_CR4_VMXE) || > + !(vcpu->arch.cr0 & X86_CR0_PE) || kvm_read_cr0_bits, kvm_read_cr4_bits. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html