This patch allows the guest to enable the VMXE bit in CR4, which is a prerequisite to running VMXON. Whether to allow setting the VMXE bit now depends on the architecture (svm or vmx), so its checking has moved to kvm_x86_ops->set_cr4(). This function now returns an int: If kvm_x86_ops->set_cr4() returns 1, __kvm_set_cr4() will also return 1, and this will cause kvm_set_cr4() will throw a #GP. Turning on the VMXE bit is allowed only when the "nested" module option is on, and turning it off is forbidden after a vmxon. Signed-off-by: Nadav Har'El <nyh@xxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 2 +- arch/x86/kvm/svm.c | 6 +++++- arch/x86/kvm/vmx.c | 13 +++++++++++-- arch/x86/kvm/x86.c | 4 +--- 4 files changed, 18 insertions(+), 7 deletions(-) --- .before/arch/x86/include/asm/kvm_host.h 2010-10-17 11:52:00.000000000 +0200 +++ .after/arch/x86/include/asm/kvm_host.h 2010-10-17 11:52:00.000000000 +0200 @@ -532,7 +532,7 @@ struct kvm_x86_ops { void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); - void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); + int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); --- .before/arch/x86/kvm/svm.c 2010-10-17 11:52:00.000000000 +0200 +++ .after/arch/x86/kvm/svm.c 2010-10-17 11:52:00.000000000 +0200 @@ -1271,11 +1271,14 @@ static void svm_set_cr0(struct kvm_vcpu update_cr0_intercept(svm); } -static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE; unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4; + if (cr4 & X86_CR4_VMXE) + return 1; + if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE)) force_new_asid(vcpu); @@ -1284,6 +1287,7 @@ static void svm_set_cr4(struct kvm_vcpu cr4 |= X86_CR4_PAE; cr4 |= host_cr4_mce; to_svm(vcpu)->vmcb->save.cr4 = cr4; + return 0; } static void svm_set_segment(struct kvm_vcpu *vcpu, --- .before/arch/x86/kvm/x86.c 2010-10-17 11:52:00.000000000 +0200 +++ .after/arch/x86/kvm/x86.c 2010-10-17 11:52:00.000000000 +0200 @@ -603,11 +603,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, u && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)) return 1; - if (cr4 & X86_CR4_VMXE) + if (kvm_x86_ops->set_cr4(vcpu, cr4)) return 1; - kvm_x86_ops->set_cr4(vcpu, cr4); - if ((cr4 ^ old_cr4) & pdptr_bits) kvm_mmu_reset_context(vcpu); --- .before/arch/x86/kvm/vmx.c 2010-10-17 11:52:00.000000000 +0200 +++ .after/arch/x86/kvm/vmx.c 2010-10-17 11:52:00.000000000 +0200 @@ -1880,7 +1880,7 @@ static void ept_save_pdptrs(struct kvm_v (unsigned long *)&vcpu->arch.regs_dirty); } -static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); +static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, unsigned long cr0, @@ -1975,11 +1975,19 @@ static void vmx_set_cr3(struct kvm_vcpu vmcs_writel(GUEST_CR3, guest_cr3); } -static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) +static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) { unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ? KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON); + if (cr4 & X86_CR4_VMXE){ + if (!nested) + return 1; + } else { + if (nested && to_vmx(vcpu)->nested.vmxon) + return 1; + } + vcpu->arch.cr4 = cr4; if (enable_ept) { if (!is_paging(vcpu)) { @@ -1992,6 +2000,7 @@ static void vmx_set_cr4(struct kvm_vcpu vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(GUEST_CR4, hw_cr4); + return 0; } static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg) -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html