Avi Kivity <avi@xxxxxxxxxx> wrote on 02/09/2009 22:34:58: > From: > > Avi Kivity <avi@xxxxxxxxxx> > > To: > > Orit Wasserman/Haifa/IBM@IBMIL > > Cc: > > kvm@xxxxxxxxxxxxxxx, Ben-Ami Yassour1/Haifa/IBM@IBMIL, Muli Ben- > Yehuda/Haifa/IBM@IBMIL, Abel Gordon/Haifa/IBM@IBMIL, > aliguori@xxxxxxxxxx, mmday@xxxxxxxxxx > > Date: > > 02/09/2009 22:34 > > Subject: > > Re: [PATCH 1/6] Nested VMX patch 1 implements vmon and vmoff > > On 09/02/2009 06:38 PM, oritw@xxxxxxxxxx wrote: > > From: Orit Wasserman<oritw@xxxxxxxxxx> > > > > --- > > arch/x86/kvm/svm.c | 3 - > > arch/x86/kvm/vmx.c | 187 ++++++++++++++++++++++++++++++++++++++ > +++++++++++++- > > arch/x86/kvm/x86.c | 6 ++- > > arch/x86/kvm/x86.h | 2 + > > 4 files changed, 192 insertions(+), 6 deletions(-) > > > > diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c > > index 2df9b45..3c1f22a 100644 > > --- a/arch/x86/kvm/svm.c > > +++ b/arch/x86/kvm/svm.c > > @@ -124,9 +124,6 @@ static int npt = 1; > > > > module_param(npt, int, S_IRUGO); > > > > -static int nested = 1; > > -module_param(nested, int, S_IRUGO); > > - > > static void svm_flush_tlb(struct kvm_vcpu *vcpu); > > static void svm_complete_interrupts(struct vcpu_svm *svm); > > > > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > > index 78101dd..abba325 100644 > > --- a/arch/x86/kvm/vmx.c > > +++ b/arch/x86/kvm/vmx.c > > @@ -67,6 +67,11 @@ struct vmcs { > > char data[0]; > > }; > > > > +struct nested_vmx { > > + /* Has the level1 guest done vmon? */ > > + bool vmon; > > +}; > > > > vmxon fixed > > > @@ -967,6 +975,69 @@ static void guest_write_tsc(u64 guest_tsc, > u64 host_tsc) > > } > > > > /* > > + * Handles msr read for nested virtualization > > + */ > > +static int nested_vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, > > + u64 *pdata) > > +{ > > + u32 vmx_msr_low = 0, vmx_msr_high = 0; > > + > > + switch (msr_index) { > > + case MSR_IA32_FEATURE_CONTROL: > > + *pdata = 0; > > + break; > > + case MSR_IA32_VMX_BASIC: > > + rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high); > > > > Use rdmsrl, it's easier. fixed > > I think we need to mask it with the capabilities we support. Otherwise > the guest can try to use some new feature which we don't support yet, > and crash. I agree , but I went over the Intel spec and didn't find any problematic feature. We may need to consider it in the future. > > > + *pdata = vmx_msr_low | ((u64)vmx_msr_high<< 32); > > + break; > > + case MSR_IA32_VMX_PINBASED_CTLS: > > + *pdata = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING | > > + PIN_BASED_VIRTUAL_NMIS; > > > > Need to mask with actual cpu capabilities in case we run on an older cpu. fixed > > > + break; > > + case MSR_IA32_VMX_PROCBASED_CTLS: > > + *pdata = CPU_BASED_HLT_EXITING | > > +#ifdef CONFIG_X86_64 > > + CPU_BASED_CR8_LOAD_EXITING | > > + CPU_BASED_CR8_STORE_EXITING | > > +#endif > > + CPU_BASED_CR3_LOAD_EXITING | > > + CPU_BASED_CR3_STORE_EXITING | > > + CPU_BASED_USE_IO_BITMAPS | > > + CPU_BASED_MOV_DR_EXITING | > > + CPU_BASED_USE_TSC_OFFSETING | > > + CPU_BASED_INVLPG_EXITING; > > > > Same here... or are all these guaranteed to be present? fixed > > > + > > +static int handle_vmon(struct kvm_vcpu *vcpu) > > +{ > > + struct kvm_segment cs; > > + struct vcpu_vmx *vmx = to_vmx(vcpu); > > + > > + if (!nested) { > > + printk(KERN_DEBUG "%s: nested vmx not enabled\n", __func__); > > + kvm_queue_exception(vcpu, UD_VECTOR); > > + return 1; > > + } > > + > > + vmx_get_segment(vcpu,&cs, VCPU_SREG_CS); > > + > > + if (!(vcpu->arch.cr4& X86_CR4_VMXE) || > > + !(vcpu->arch.cr0& X86_CR0_PE) || > > + (vmx_get_rflags(vcpu)& X86_EFLAGS_VM) || > > + ((find_msr_entry(to_vmx(vcpu), > > + MSR_EFER)->data& EFER_LMA)&& !cs.l)) { > > > > Not everyone has EFER. Better to wrap this in an #ifdef CONFIG_X86_64. fixed > > -- > I have a truly marvellous patch that fixes the bug which this > signature is too narrow to contain. > -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html