Re: [PATCH RFC v2 5/6] KVM: moving dirty bitmaps to user space

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 04/20/2010 02:02 PM, Takuya Yoshikawa wrote:
We move dirty bitmaps to user space.

  - Allocation and destruction: we use do_mmap() and do_munmap().
    The new bitmap space is twice longer than the original one and we
    use the additional space for double buffering: this makes it
    possible to update the active bitmap while letting the user space
    read the other one safely.

  - Bitmap manipulations: we replace all functions which access dirty
    bitmaps to *_user() functions. Note that some of these should be
    optimized later.

  - For ia64: moving the dirty bitmaps of memory slots does not effect
    much to ia64 because it's using a different space to store bitmaps
    which is directly updated: all we have to change are sync and get
    of dirty log, so we don't need set_bit_user like function for ia64.

Signed-off-by: Takuya Yoshikawa<yoshikawa.takuya@xxxxxxxxxxxxx>
Signed-off-by: Fernando Luis Vazquez Cao<fernando@xxxxxxxxxxxxx>
---
  arch/ia64/kvm/kvm-ia64.c  |   12 ++++-
  arch/powerpc/kvm/book3s.c |    2 +-
  arch/x86/kvm/x86.c        |   24 +++++-----
  include/linux/kvm_host.h  |    5 +-
  virt/kvm/kvm_main.c       |  101 ++++++++++++++++++++++++++++++++++++++++-----
  5 files changed, 116 insertions(+), 28 deletions(-)

diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index d60dafe..c3f0b70 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1823,11 +1823,19 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
  	n = kvm_dirty_bitmap_bytes(memslot);
  	base = memslot->base_gfn / BITS_PER_LONG;

+	r = -EFAULT;
+	if (!access_ok(VERIFY_WRITE, memslot->dirty_bitmap, n))
+		goto out;
+
  	for (i = 0; i<  n/sizeof(long); ++i) {
  		if (dirty_bitmap[base + i])
  			memslot->is_dirty = true;

-		memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
+		if (__put_user(dirty_bitmap[base + i],
+			&memslot->dirty_bitmap[i])) {
+			r = -EFAULT;
+			goto out;
+		}
  		dirty_bitmap[base + i] = 0;
  	}
  	r = 0;
@@ -1858,7 +1866,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
  	if (memslot->is_dirty) {
  		kvm_flush_remote_tlbs(kvm);
  		n = kvm_dirty_bitmap_bytes(memslot);
-		memset(memslot->dirty_bitmap, 0, n);
+		clear_user(memslot->dirty_bitmap, n);
  		memslot->is_dirty = false;

Does this need an error check?


@@ -468,8 +480,12 @@ void kvm_free_physmem(struct kvm *kvm)
  	int i;
  	struct kvm_memslots *slots = kvm->memslots;

-	for (i = 0; i<  slots->nmemslots; ++i)
+	for (i = 0; i<  slots->nmemslots; ++i) {
+		/* We don't munmap dirty bitmaps by ourselves. */

Why not?  If we allocated them, we have to free them.

+		slots->memslots[i].dirty_bitmap = NULL;
+		slots->memslots[i].dirty_bitmap_old = NULL;
  		kvm_free_physmem_slot(&slots->memslots[i], NULL);
+	}

  	kfree(kvm->memslots);
  }
@@ -523,13 +539,22 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)

  static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
  {
-	unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
+	unsigned long user_addr;
+	unsigned long n = kvm_dirty_bitmap_bytes(memslot);

-	memslot->dirty_bitmap = vmalloc(dirty_bytes);
-	if (!memslot->dirty_bitmap)
-		return -ENOMEM;
+	down_write(&current->mm->mmap_sem);
+	user_addr = do_mmap(NULL, 0, 2 * n,
+			    PROT_READ | PROT_WRITE,
+			    MAP_PRIVATE | MAP_ANONYMOUS, 0);
+	up_write(&current->mm->mmap_sem);
+
+	if (IS_ERR((void *)user_addr))
+		return PTR_ERR((void *)user_addr);
+
+	memslot->dirty_bitmap = (unsigned long __user *)user_addr;
+	memslot->dirty_bitmap_old = (unsigned long __user *)(user_addr + n);
+	clear_user(memslot->dirty_bitmap, 2 * n);

Error check.


-	memset(memslot->dirty_bitmap, 0, dirty_bytes);
  	return 0;
  }

@@ -778,13 +803,45 @@ int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
  }

  int kvm_copy_dirty_bitmap(unsigned long __user *to,
-			  const unsigned long *from,
+			  const unsigned long __user *from,
  			  unsigned long bytes)
  {
-	if (copy_to_user(to, from, bytes))
+#if defined(CONFIG_X86_64) || defined(CONFIG_PPC64) || defined(CONFIG_IA64)
+	if (copy_in_user(to, from, bytes)) {
+		printk(KERN_WARNING "%s: copy_in_user failed.\n", __func__);
  		return -EFAULT;
+	}
+	return 0;
+#else
+	int num, bufbytes;
+	unsigned long buf[32];

+	if (!access_ok(VERIFY_READ, from, bytes) ||
+	    !access_ok(VERIFY_WRITE, to, bytes)) {
+		goto out_fault;
+	}
+
+	bufbytes = sizeof(buf);
+	num = bufbytes / sizeof(buf[0]);
+
+	for (; bytes>  bufbytes; bytes -= bufbytes, to += num, from += num) {
+		if (__copy_from_user(buf, from, bufbytes))
+			goto out_fault;
+		if (__copy_to_user(to, buf, bufbytes))
+			goto out_fault;
+	}
+	if (bytes>  0) {
+		if (__copy_from_user(buf, from, bytes))
+			goto out_fault;
+		if (__copy_to_user(to, buf, bytes))
+			goto out_fault;
+	}
  	return 0;
+
+out_fault:
+	printk(KERN_WARNING "%s: copy to(from) user failed.\n", __func__);
+	return -EFAULT;
+#endif
  }

This really wants to be hidden in lib/.


  int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
@@ -1194,13 +1251,35 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
  }
  EXPORT_SYMBOL_GPL(kvm_clear_guest);

+/*
+ * Please use generic *_user bitops once they become available.
+ * Be careful setting the bit won't be done atomically.
+ */

Please introduce the user bitops as part of this patchset.

--
error compiling committee.c: too many arguments to function

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux