The patch titled KVM: cache guest cr4 in vcpu structure has been added to the -mm tree. Its filename is kvm-cache-guest-cr4-in-vcpu-structure.patch See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find out what to do about this ------------------------------------------------------ Subject: KVM: cache guest cr4 in vcpu structure From: Avi Kivity <avi@xxxxxxxxxxxx> This eliminates needing to have an arch operation to get cr4. Signed-off-by: Avi Kivity <avi@xxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxx> --- drivers/kvm/kvm.h | 15 +++++---------- drivers/kvm/kvm_main.c | 17 +++++++++-------- drivers/kvm/kvm_vmx.h | 20 ++++++++++++++++++++ drivers/kvm/mmu.c | 4 ++-- drivers/kvm/paging_tmpl.h | 6 +++--- 5 files changed, 39 insertions(+), 23 deletions(-) diff -puN drivers/kvm/kvm.h~kvm-cache-guest-cr4-in-vcpu-structure drivers/kvm/kvm.h --- a/drivers/kvm/kvm.h~kvm-cache-guest-cr4-in-vcpu-structure +++ a/drivers/kvm/kvm.h @@ -168,6 +168,7 @@ struct kvm_vcpu { unsigned long cr2; unsigned long cr3; + unsigned long cr4; unsigned long cr8; u64 shadow_efer; u64 apic_base; @@ -335,20 +336,14 @@ static inline int is_long_mode(void) return vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_CONTROLS_IA32E_MASK; } -static inline unsigned long guest_cr4(void) +static inline int is_pae(struct kvm_vcpu *vcpu) { - return (vmcs_readl(CR4_READ_SHADOW) & KVM_GUEST_CR4_MASK) | - (vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK); + return vcpu->cr4 & CR4_PAE_MASK; } -static inline int is_pae(void) +static inline int is_pse(struct kvm_vcpu *vcpu) { - return guest_cr4() & CR4_PAE_MASK; -} - -static inline int is_pse(void) -{ - return guest_cr4() & CR4_PSE_MASK; + return vcpu->cr4 & CR4_PSE_MASK; } static inline unsigned long guest_cr0(void) diff -puN drivers/kvm/kvm_main.c~kvm-cache-guest-cr4-in-vcpu-structure drivers/kvm/kvm_main.c --- a/drivers/kvm/kvm_main.c~kvm-cache-guest-cr4-in-vcpu-structure +++ a/drivers/kvm/kvm_main.c @@ -940,7 +940,7 @@ static void set_cr0(struct kvm_vcpu *vcp #ifdef __x86_64__ if ((vcpu->shadow_efer & EFER_LME)) { u32 guest_cs_ar; - if (!is_pae()) { + if (!is_pae(vcpu)) { printk(KERN_DEBUG "set_cr0: #GP, start paging " "in long mode while PAE is disabled\n"); inject_gp(vcpu); @@ -956,7 +956,7 @@ static void set_cr0(struct kvm_vcpu *vcp } } else #endif - if (is_pae() && + if (is_pae(vcpu) && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) { printk(KERN_DEBUG "set_cr0: #GP, pdptrs " "reserved bits\n"); @@ -993,6 +993,7 @@ static void __set_cr4(struct kvm_vcpu *v vmcs_writel(CR4_READ_SHADOW, cr4); vmcs_writel(GUEST_CR4, cr4 | (vcpu->rmode.active ? KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON)); + vcpu->cr4 = cr4; } static void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) @@ -1010,7 +1011,7 @@ static void set_cr4(struct kvm_vcpu *vcp inject_gp(vcpu); return; } - } else if (is_paging() && !is_pae() && (cr4 & CR4_PAE_MASK) + } else if (is_paging() && !is_pae(vcpu) && (cr4 & CR4_PAE_MASK) && pdptrs_have_reserved_bits_set(vcpu, vcpu->cr3)) { printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n"); inject_gp(vcpu); @@ -1041,7 +1042,7 @@ static void set_cr3(struct kvm_vcpu *vcp inject_gp(vcpu); return; } - if (is_paging() && is_pae() && + if (is_paging() && is_pae(vcpu) && pdptrs_have_reserved_bits_set(vcpu, cr3)) { printk(KERN_DEBUG "set_cr3: #GP, pdptrs " "reserved bits\n"); @@ -1902,7 +1903,7 @@ unsigned long realmode_get_cr(struct kvm case 3: return vcpu->cr3; case 4: - return guest_cr4(); + return vcpu->cr4; default: vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr); return 0; @@ -1924,7 +1925,7 @@ void realmode_set_cr(struct kvm_vcpu *vc set_cr3(vcpu, val); break; case 4: - set_cr4(vcpu, mk_cr_64(guest_cr4(), val)); + set_cr4(vcpu, mk_cr_64(vcpu->cr4, val)); break; default: vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr); @@ -2844,7 +2845,7 @@ static int kvm_dev_ioctl_get_sregs(struc sregs->cr0 = guest_cr0(); sregs->cr2 = vcpu->cr2; sregs->cr3 = vcpu->cr3; - sregs->cr4 = guest_cr4(); + sregs->cr4 = vcpu->cr4; sregs->cr8 = vcpu->cr8; sregs->efer = vcpu->shadow_efer; sregs->apic_base = vcpu->apic_base; @@ -2912,7 +2913,7 @@ static int kvm_dev_ioctl_set_sregs(struc vmcs_writel(GUEST_CR0, (sregs->cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON); - mmu_reset_needed |= guest_cr4() != sregs->cr4; + mmu_reset_needed |= vcpu->cr4 != sregs->cr4; __set_cr4(vcpu, sregs->cr4); if (mmu_reset_needed) diff -puN /dev/null drivers/kvm/kvm_vmx.h --- /dev/null +++ a/drivers/kvm/kvm_vmx.h @@ -0,0 +1,20 @@ +#ifndef __KVM_VMX_H +#define __KVM_VMX_H + +static inline void vmcs_write16(unsigned long field, u16 value) +{ + vmcs_writel(field, value); +} + +static inline void vmcs_write64(unsigned long field, u64 value) +{ +#ifdef __x86_64__ + vmcs_writel(field, value); +#else + vmcs_writel(field, value); + asm volatile (""); + vmcs_writel(field+1, value >> 32); +#endif +} + +#endif diff -puN drivers/kvm/mmu.c~kvm-cache-guest-cr4-in-vcpu-structure drivers/kvm/mmu.c --- a/drivers/kvm/mmu.c~kvm-cache-guest-cr4-in-vcpu-structure +++ a/drivers/kvm/mmu.c @@ -564,7 +564,7 @@ static int paging64_init_context(struct { struct kvm_mmu *context = &vcpu->mmu; - ASSERT(is_pae()); + ASSERT(is_pae(vcpu)); context->new_cr3 = paging_new_cr3; context->page_fault = paging64_page_fault; context->inval_page = paging_inval_page; @@ -618,7 +618,7 @@ static int init_kvm_mmu(struct kvm_vcpu return nonpaging_init_context(vcpu); else if (is_long_mode()) return paging64_init_context(vcpu); - else if (is_pae()) + else if (is_pae(vcpu)) return paging32E_init_context(vcpu); else return paging32_init_context(vcpu); diff -puN drivers/kvm/paging_tmpl.h~kvm-cache-guest-cr4-in-vcpu-structure drivers/kvm/paging_tmpl.h --- a/drivers/kvm/paging_tmpl.h~kvm-cache-guest-cr4-in-vcpu-structure +++ a/drivers/kvm/paging_tmpl.h @@ -70,7 +70,7 @@ static void FNAME(init_walker)(struct gu hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK); walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); - ASSERT((!is_long_mode() && is_pae()) || + ASSERT((!is_long_mode() && is_pae(vcpu)) || (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); walker->table = (pt_element_t *)( (unsigned long)walker->table | @@ -133,7 +133,7 @@ static pt_element_t *FNAME(fetch_guest)( !is_present_pte(walker->table[index]) || (walker->level == PT_DIRECTORY_LEVEL && (walker->table[index] & PT_PAGE_SIZE_MASK) && - (PTTYPE == 64 || is_pse()))) + (PTTYPE == 64 || is_pse(vcpu)))) return &walker->table[index]; if (walker->level != 3 || is_long_mode()) walker->inherited_ar &= walker->table[index]; @@ -369,7 +369,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kv if (walker.level == PT_DIRECTORY_LEVEL) { ASSERT((guest_pte & PT_PAGE_SIZE_MASK)); - ASSERT(PTTYPE == 64 || is_pse()); + ASSERT(PTTYPE == 64 || is_pse(vcpu)); gpa = (guest_pte & PT_DIR_BASE_ADDR_MASK) | (vaddr & (PT_LEVEL_MASK(PT_PAGE_TABLE_LEVEL) | ~PAGE_MASK)); _ Patches currently in -mm which might be from avi@xxxxxxxxxxxx are kvm-userspace-interface.patch kvm-userspace-interface-make-enum-values-in-userspace-interface-explicit.patch kvm-intel-virtual-mode-extensions-definitions.patch kvm-kvm-data-structures.patch kvm-random-accessors-and-constants.patch kvm-virtualization-infrastructure.patch kvm-virtualization-infrastructure-kvm-fix-guest-cr4-corruption.patch kvm-virtualization-infrastructure-include-desch.patch kvm-virtualization-infrastructure-fix-segment-state-changes-across-processor-mode-switches.patch kvm-virtualization-infrastructure-fix-asm-constraints-for-segment-loads.patch kvm-virtualization-infrastructure-fix-mmu-reset-locking-when-setting-cr0.patch kvm-memory-slot-management.patch kvm-vcpu-creation-and-maintenance.patch kvm-vcpu-creation-and-maintenance-segment-access-cleanup.patch kvm-workaround-cr0cd-cache-disable-bit-leak-from-guest-to.patch kvm-vcpu-execution-loop.patch kvm-define-exit-handlers.patch kvm-define-exit-handlers-pass-fs-gs-segment-bases-to-x86-emulator.patch kvm-less-common-exit-handlers.patch kvm-less-common-exit-handlers-handle-rdmsrmsr_efer.patch kvm-mmu.patch kvm-x86-emulator.patch kvm-clarify-licensing.patch kvm-x86-emulator-fix-emulator-mov-cr-decoding.patch kvm-plumbing.patch kvm-dynamically-determine-which-msrs-to-load-and-save.patch kvm-fix-calculation-of-initial-value-of-rdx-register.patch kvm-avoid-using-vmx-instruction-directly.patch kvm-avoid-using-vmx-instruction-directly-fix-asm-constraints.patch kvm-expose-interrupt-bitmap.patch kvm-add-time-stamp-counter-msr-and-accessors.patch kvm-expose-msrs-to-userspace.patch kvm-expose-msrs-to-userspace-v2.patch kvm-create-kvm-intelko-module.patch kvm-make-dev-registration-happen-when-the-arch.patch kvm-make-hardware-detection-an-arch-operation.patch kvm-make-the-per-cpu-enable-disable-functions-arch.patch kvm-make-the-hardware-setup-operations-non-percpu.patch kvm-make-the-guest-debugger-an-arch-operation.patch kvm-make-msr-accessors-arch-operations.patch kvm-make-the-segment-accessors-arch-operations.patch kvm-cache-guest-cr4-in-vcpu-structure.patch kvm-cache-guest-cr0-in-vcpu-structure.patch kvm-add-get_segment_base-arch-accessor.patch kvm-add-idt-and-gdt-descriptor-accessors.patch kvm-make-syncing-the-register-file-to-the-vcpu.patch kvm-make-the-vcpu-execution-loop-an-arch-operation.patch kvm-move-the-vmx-exit-handlers-to-vmxc.patch kvm-make-vcpu_setup-an-arch-operation.patch kvm-make-__set_cr0-and-dependencies-arch-operations.patch kvm-make-__set_cr4-an-arch-operation.patch kvm-make-__set_efer-an-arch-operation.patch kvm-make-set_cr3-and-tlb-flushing-arch-operations.patch kvm-make-inject_page_fault-an-arch-operation.patch kvm-make-inject_gp-an-arch-operation.patch kvm-use-the-idt-and-gdt-accessors-in-realmode-emulation.patch kvm-use-the-general-purpose-register-accessors-rather.patch kvm-move-the-vmx-tsc-accessors-to-vmxc.patch kvm-access-rflags-through-an-arch-operation.patch kvm-move-the-vmx-segment-field-definitions-to-vmxc.patch kvm-add-an-arch-accessor-for-cs-d-b-and-l-bits.patch kvm-add-a-set_cr0_no_modeswitch-arch-accessor.patch kvm-make-vcpu_load-and-vcpu_put-arch-operations.patch kvm-make-vcpu-creation-and-destruction-arch-operations.patch kvm-move-vmcs-static-variables-to-vmxc.patch kvm-make-is_long_mode-an-arch-operation.patch kvm-use-the-tlb-flush-arch-operation-instead-of-an.patch kvm-remove-guest_cpl.patch kvm-move-vmcs-accessors-to-vmxc.patch kvm-move-vmx-helper-inlines-to-vmxc.patch kvm-remove-vmx-includes-from-arch-independent-code.patch - To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html