Now that mmu_lock is held only inside kvm_mmu_write_protect_pt_masked(), we can do __put_user() for copying each 64/32 dirty bits to user-space. This eliminates the need to copy the whole bitmap to an extra buffer and the resulting code is much more cache friendly than before. Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@xxxxxxxxxxxxx> Cc: Xiao Guangrong <xiaoguangrong@xxxxxxxxxxxxxxxxxx> --- arch/x86/kvm/x86.c | 18 ++++++++---------- virt/kvm/kvm_main.c | 6 +----- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1d1f6df..79e8ad0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3522,7 +3522,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) struct kvm_memory_slot *memslot; unsigned long n, i; unsigned long *dirty_bitmap; - unsigned long *dirty_bitmap_buffer; + unsigned long __user *p_user; bool is_dirty = false; mutex_lock(&kvm->slots_lock); @@ -3539,11 +3539,12 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) goto out; n = kvm_dirty_bitmap_bytes(memslot); + r = -EFAULT; + if (clear_user(log->dirty_bitmap, n)) + goto out; - dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); - memset(dirty_bitmap_buffer, 0, n); - - for (i = 0; i < n / sizeof(long); i++) { + p_user = (unsigned long __user *)log->dirty_bitmap; + for (i = 0; i < n / sizeof(long); i++, p_user++) { unsigned long mask; gfn_t offset; @@ -3553,7 +3554,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) is_dirty = true; mask = xchg(&dirty_bitmap[i], 0); - dirty_bitmap_buffer[i] = mask; + if (__put_user(mask, p_user)) + goto out; offset = i * BITS_PER_LONG; kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); @@ -3561,10 +3563,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) if (is_dirty) kvm_flush_remote_tlbs(kvm); - r = -EFAULT; - if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) - goto out; - r = 0; out: mutex_unlock(&kvm->slots_lock); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index bf040c4..c919f58 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -626,14 +626,10 @@ static int kvm_vm_release(struct inode *inode, struct file *filp) return 0; } -/* - * Allocation size is twice as large as the actual dirty bitmap size. - * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed. - */ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) { #ifndef CONFIG_S390 - unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot); + unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot); memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes); if (!memslot->dirty_bitmap) -- 1.7.9.5 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html