Currently, x86 vmalloc()s a dirty bitmap every time when we swich to the next dirty bitmap. To avoid this, we use the double buffering technique: we also move the bitmaps to userspace, so that extra bitmaps will not use the precious kernel resource. This idea is based on Avi's suggestion. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@xxxxxxxxxxxxx> Signed-off-by: Fernando Luis Vazquez Cao <fernando@xxxxxxxxxxxxx> --- arch/x86/include/asm/kvm_host.h | 3 +++ include/linux/kvm_host.h | 6 ++++++ 2 files changed, 9 insertions(+), 0 deletions(-) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0c49c88..b502bca 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -25,6 +25,9 @@ #include <asm/mtrr.h> #include <asm/msr-index.h> +/* Select x86 specific features in <linux/kvm_host.h> */ +#define __KVM_HAVE_USER_DIRTYBITMAP + #define KVM_MAX_VCPUS 64 #define KVM_MEMORY_SLOTS 32 /* memory slots that does not exposed to userspace */ diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index dd6bcf4..07092d6 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -110,7 +110,13 @@ struct kvm_memory_slot { unsigned long npages; unsigned long flags; unsigned long *rmap; +#ifndef __KVM_HAVE_USER_DIRTYBITMAP unsigned long *dirty_bitmap; +#else + unsigned long __user *dirty_bitmap; + unsigned long __user *dirty_bitmap_old; + bool is_dirty; +#endif struct { unsigned long rmap_pde; int write_count; -- 1.6.3.3 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html