On Sun, Mar 10, 2013 at 06:05:25PM +0200, Abel Gordon wrote: > Prepare vmread and vmwrite bitmaps according to a pre-specified list of fields. > These lists are intended to specifiy most frequent accessed fields so we can > minimize the number of fields that are copied from/to the software controlled > VMCS12 format to/from to processor-specific shadow vmcs. The lists were built > measuring the VMCS fields access rate after L2 Ubuntu 12.04 booted when it was > running on top of L1 KVM, also Ubuntu 12.04. Note that during boot there were > additional fields which were frequently modified but they were not added to > these lists because after boot these fields were not longer accessed by L1. > > Signed-off-by: Abel Gordon <abelg@xxxxxxxxxx> > --- > arch/x86/kvm/vmx.c | 75 ++++++++++++++++++++++++++++++++++++++++++- > 1 file changed, 74 insertions(+), 1 deletion(-) > > --- .before/arch/x86/kvm/vmx.c 2013-03-10 18:00:54.000000000 +0200 > +++ .after/arch/x86/kvm/vmx.c 2013-03-10 18:00:54.000000000 +0200 > @@ -453,6 +453,51 @@ static inline struct vcpu_vmx *to_vmx(st > #define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \ > [number##_HIGH] = VMCS12_OFFSET(name)+4 > > + > +static const unsigned long shadow_read_only_fields[] = { > + VM_EXIT_REASON, > + VM_EXIT_INTR_INFO, > + VM_EXIT_INSTRUCTION_LEN, > + IDT_VECTORING_INFO_FIELD, > + IDT_VECTORING_ERROR_CODE, > + VM_EXIT_INTR_ERROR_CODE, > + EXIT_QUALIFICATION, > + GUEST_LINEAR_ADDRESS, > + GUEST_PHYSICAL_ADDRESS > +}; > +static const int max_shadow_read_only_fields = ARRAY_SIZE(shadow_read_only_fields); > + > +static const unsigned long shadow_read_write_fields[] = { > + GUEST_RIP, > + GUEST_RSP, > + GUEST_CR0, > + GUEST_CR3, > + GUEST_CR4, > + GUEST_INTERRUPTIBILITY_INFO, > + GUEST_RFLAGS, > + GUEST_CS_SELECTOR, > + GUEST_CS_AR_BYTES, > + GUEST_CS_LIMIT, > + GUEST_CS_BASE, > + GUEST_ES_BASE, > + CR0_GUEST_HOST_MASK, > + CR0_READ_SHADOW, > + CR4_READ_SHADOW, > + TSC_OFFSET, > + EXCEPTION_BITMAP, > + CPU_BASED_VM_EXEC_CONTROL, > + VM_ENTRY_EXCEPTION_ERROR_CODE, > + VM_ENTRY_INTR_INFO_FIELD, > + VM_ENTRY_INSTRUCTION_LEN, > + VM_ENTRY_EXCEPTION_ERROR_CODE, > + HOST_FS_BASE, > + HOST_GS_BASE, > + HOST_FS_SELECTOR, > + HOST_GS_SELECTOR > +}; > +static const int max_shadow_read_write_fields = > + ARRAY_SIZE(shadow_read_write_fields); > + > static const unsigned short vmcs_field_to_offset_table[] = { > FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id), > FIELD(GUEST_ES_SELECTOR, guest_es_selector), > @@ -642,6 +687,8 @@ static unsigned long *vmx_msr_bitmap_leg > static unsigned long *vmx_msr_bitmap_longmode; > static unsigned long *vmx_msr_bitmap_legacy_x2apic; > static unsigned long *vmx_msr_bitmap_longmode_x2apic; > +static unsigned long *vmx_vmread_bitmap; > +static unsigned long *vmx_vmwrite_bitmap; > > static bool cpu_has_load_ia32_efer; > static bool cpu_has_load_perf_global_ctrl; > @@ -4033,6 +4080,8 @@ static int vmx_vcpu_setup(struct vcpu_vm > vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a)); > vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); > > + vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap)); > + vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap)); Why are you doing it without checking that shadow vmcs is supported and enabled? > if (cpu_has_vmx_msr_bitmap()) > vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy)); > > @@ -7764,6 +7813,24 @@ static int __init vmx_init(void) > (unsigned long *)__get_free_page(GFP_KERNEL); > if (!vmx_msr_bitmap_longmode_x2apic) > goto out4; > + vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); > + if (!vmx_vmread_bitmap) > + goto out4; > + > + vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); > + if (!vmx_vmwrite_bitmap) > + goto out5; > + We need to cleanup this bitmaps allocations some day to allocate only when the feature is supported and used. > + memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); > + memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); > + /* shadowed read/write fields */ > + for (i = 0; i < max_shadow_read_write_fields; i++) { > + clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap); > + clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap); > + } > + /* shadowed read only fields */ > + for (i = 0; i < max_shadow_read_only_fields; i++) > + clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap); > > /* > * Allow direct access to the PC debug port (it is often used for I/O > @@ -7782,7 +7849,7 @@ static int __init vmx_init(void) > r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), > __alignof__(struct vcpu_vmx), THIS_MODULE); > if (r) > - goto out3; > + goto out6; > > #ifdef CONFIG_KEXEC > rcu_assign_pointer(crash_vmclear_loaded_vmcss, > @@ -7830,6 +7897,10 @@ static int __init vmx_init(void) > > return 0; > > +out6: > + free_page((unsigned long)vmx_vmwrite_bitmap); > +out5: > + free_page((unsigned long)vmx_vmread_bitmap); > out4: > free_page((unsigned long)vmx_msr_bitmap_longmode); > out3: > @@ -7851,6 +7922,8 @@ static void __exit vmx_exit(void) > free_page((unsigned long)vmx_msr_bitmap_longmode); > free_page((unsigned long)vmx_io_bitmap_b); > free_page((unsigned long)vmx_io_bitmap_a); > + free_page((unsigned long)vmx_vmwrite_bitmap); > + free_page((unsigned long)vmx_vmread_bitmap); > > #ifdef CONFIG_KEXEC > rcu_assign_pointer(crash_vmclear_loaded_vmcss, NULL); > > -- > To unsubscribe from this list: send the line "unsubscribe kvm" in > the body of a message to majordomo@xxxxxxxxxxxxxxx > More majordomo info at http://vger.kernel.org/majordomo-info.html -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html