Currently x86's kvm_vm_ioctl_get_dirty_log() needs to allocate a bitmap by vmalloc() which will be used in the next logging and this has been causing bad effect to VGA and live-migration: vmalloc() consumes extra systime, triggers tlb flush, etc. This patch resolves this issue by pre-allocating one more bitmap and switching between two bitmaps during dirty logging. Performance improvement: I measured performance for the case of VGA update by trace-cmd. - Without this patch | kvm_vm_ioctl_get_dirty_log() { | mutex_lock() { 0.195 us | _cond_resched(); 0.683 us | } 0.207 us | _raw_spin_lock(); | kvm_mmu_slot_remove_write_access() { ... ... 2.916 us | } | vmalloc() { ... ... + 43.731 us | } 0.222 us | memset(); | T.1632() { | __kmalloc() { ... ... 2.870 us | } 3.257 us | } | synchronize_srcu_expedited() { ... ... ! 143.147 us | } 0.480 us | kfree(); | copy_to_user() { 0.196 us | _cond_resched(); 0.635 us | } | vfree() { ... ... + 12.103 us | } + 12.508 us | } 0.218 us | mutex_unlock(); ! 211.323 us | } - With this patch | kvm_vm_ioctl_get_dirty_log() { | mutex_lock() { 0.199 us | _cond_resched(); 0.703 us | } 0.222 us | _raw_spin_lock(); | kvm_mmu_slot_remove_write_access() { ... ... 2.179 us | } 0.225 us | memset(); | T.1634() { | __kmalloc() { ... ... 2.367 us | } 2.791 us | } | synchronize_srcu_expedited() { ... ... ! 125.299 us | } 0.263 us | kfree(); | copy_to_user() { 0.196 us | _cond_resched(); 0.647 us | } 0.214 us | mutex_unlock(); ! 135.223 us | } So the result was 1.5 times faster than the original. In the case of live migration, the improvement ratio depends on the workload and the guest memory size. Note: This does not change other architectures's logic but the allocation size becomes twice. This will increase the actual memory consumption only when the new size changes the number of pages allocated by vmalloc(). Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@xxxxxxxxxxxxx> Signed-off-by: Fernando Luis Vazquez Cao <fernando@xxxxxxxxxxxxx> --- arch/x86/kvm/x86.c | 16 +++++----------- include/linux/kvm_host.h | 1 + virt/kvm/kvm_main.c | 11 +++++++++-- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f3f86b2..c4d2e0b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3171,18 +3171,15 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, kvm_mmu_slot_remove_write_access(kvm, log->slot); spin_unlock(&kvm->mmu_lock); - r = -ENOMEM; - dirty_bitmap = vmalloc(n); - if (!dirty_bitmap) - goto out; + dirty_bitmap = memslot->dirty_bitmap_head; + if (memslot->dirty_bitmap == dirty_bitmap) + dirty_bitmap += n / sizeof(long); memset(dirty_bitmap, 0, n); r = -ENOMEM; slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL); - if (!slots) { - vfree(dirty_bitmap); + if (!slots) goto out; - } memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots)); slots->memslots[log->slot].dirty_bitmap = dirty_bitmap; @@ -3193,11 +3190,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, kfree(old_slots); r = -EFAULT; - if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { - vfree(dirty_bitmap); + if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) goto out; - } - vfree(dirty_bitmap); } else { r = -EFAULT; if (clear_user(log->dirty_bitmap, n)) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 0b89d00..7c956d8 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -119,6 +119,7 @@ struct kvm_memory_slot { unsigned long flags; unsigned long *rmap; unsigned long *dirty_bitmap; + unsigned long *dirty_bitmap_head; struct { unsigned long rmap_pde; int write_count; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 62ae13f..b15d1eb 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -447,8 +447,9 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot) if (!memslot->dirty_bitmap) return; - vfree(memslot->dirty_bitmap); + vfree(memslot->dirty_bitmap_head); memslot->dirty_bitmap = NULL; + memslot->dirty_bitmap_head = NULL; } /* @@ -535,15 +536,21 @@ static int kvm_vm_release(struct inode *inode, struct file *filp) return 0; } +/* + * Allocation size is twice as large as the actual dirty bitmap size. + * This makes it possible to do double buffering: see x86's + * kvm_vm_ioctl_get_dirty_log(). + */ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) { - unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot); + unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); memslot->dirty_bitmap = vmalloc(dirty_bytes); if (!memslot->dirty_bitmap) return -ENOMEM; memset(memslot->dirty_bitmap, 0, dirty_bytes); + memslot->dirty_bitmap_head = memslot->dirty_bitmap; return 0; } -- 1.7.0.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html