[kvm-unit-tests PATCH v2 1/6] lib/x86/vm: collection of improvements

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Ensure we're page aligned, add locking, just return if NULL is
passed to vfree().

Signed-off-by: Andrew Jones <drjones@xxxxxxxxxx>
---
 lib/x86/asm/page.h |  2 ++
 lib/x86/vm.c       | 44 +++++++++++++++++++++++++++++++++++++-------
 2 files changed, 39 insertions(+), 7 deletions(-)

diff --git a/lib/x86/asm/page.h b/lib/x86/asm/page.h
index 5044a49ab0cc..dd999304f1f0 100644
--- a/lib/x86/asm/page.h
+++ b/lib/x86/asm/page.h
@@ -16,6 +16,8 @@
 
 #ifndef __ASSEMBLY__
 
+#define PAGE_ALIGN(addr)	ALIGN(addr, PAGE_SIZE)
+
 #ifdef __x86_64__
 #define LARGE_PAGE_SIZE	(512 * PAGE_SIZE)
 #else
diff --git a/lib/x86/vm.c b/lib/x86/vm.c
index f7e778b3779c..baea17e7f475 100644
--- a/lib/x86/vm.c
+++ b/lib/x86/vm.c
@@ -1,37 +1,54 @@
 #include "fwcfg.h"
 #include "vm.h"
 #include "libcflat.h"
+#include "asm/spinlock.h"
 
+static struct spinlock heap_lock;
+static struct spinlock vm_lock;
 static void *free = 0;
 static void *vfree_top = 0;
 
 static void free_memory(void *mem, unsigned long size)
 {
+    assert(!((unsigned long)mem & ~PAGE_MASK));
+
+    spin_lock(&heap_lock);
+
+    free = NULL;
+
     while (size >= PAGE_SIZE) {
 	*(void **)mem = free;
 	free = mem;
 	mem += PAGE_SIZE;
 	size -= PAGE_SIZE;
     }
+
+    spin_unlock(&heap_lock);
 }
 
 void *alloc_page()
 {
     void *p;
 
+    spin_lock(&heap_lock);
+
     if (!free)
-	return 0;
+	return NULL;
 
     p = free;
     free = *(void **)free;
 
+    spin_unlock(&heap_lock);
+
     return p;
 }
 
 void free_page(void *page)
 {
+    spin_lock(&heap_lock);
     *(void **)page = free;
     free = page;
+    spin_unlock(&heap_lock);
 }
 
 extern char edata;
@@ -162,11 +179,13 @@ void *vmalloc(unsigned long size)
     void *mem, *p;
     unsigned pages;
 
-    size += sizeof(unsigned long);
+    size = PAGE_ALIGN(size + sizeof(unsigned long));
 
-    size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
+    spin_lock(&vm_lock);
     vfree_top -= size;
     mem = p = vfree_top;
+    spin_unlock(&vm_lock);
+
     pages = size / PAGE_SIZE;
     while (pages--) {
 	install_page(phys_to_virt(read_cr3()), virt_to_phys(alloc_page()), p);
@@ -179,12 +198,18 @@ void *vmalloc(unsigned long size)
 
 uint64_t virt_to_phys_cr3(void *mem)
 {
-    return (*get_pte(phys_to_virt(read_cr3()), mem) & PT_ADDR_MASK) + ((ulong)mem & (PAGE_SIZE - 1));
+    return (*get_pte(phys_to_virt(read_cr3()), mem) & PT_ADDR_MASK) + ((ulong)mem & ~PAGE_MASK);
 }
 
 void vfree(void *mem)
 {
-    unsigned long size = ((unsigned long *)mem)[-1];
+    unsigned long size;
+
+    if (mem == NULL)
+	return;
+
+    mem -= sizeof(unsigned long);
+    size = *(unsigned long *)mem;
 
     while (size) {
 	free_page(phys_to_virt(*get_pte(phys_to_virt(read_cr3()), mem) & PT_ADDR_MASK));
@@ -198,11 +223,14 @@ void *vmap(unsigned long long phys, unsigned long size)
     void *mem, *p;
     unsigned pages;
 
-    size = (size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
-    vfree_top -= size;
+    size = PAGE_ALIGN(size);
     phys &= ~(unsigned long long)(PAGE_SIZE - 1);
 
+    spin_lock(&vm_lock);
+    vfree_top -= size;
     mem = p = vfree_top;
+    spin_unlock(&vm_lock);
+
     pages = size / PAGE_SIZE;
     while (pages--) {
 	install_page(phys_to_virt(read_cr3()), phys, p);
@@ -214,7 +242,9 @@ void *vmap(unsigned long long phys, unsigned long size)
 
 void *alloc_vpages(ulong nr)
 {
+	spin_lock(&vm_lock);
 	vfree_top -= PAGE_SIZE * nr;
+	spin_unlock(&vm_lock);
 	return vfree_top;
 }
 
-- 
2.7.4

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux