[PATCH kvm-unit-tests 08/10] alloc: implement free

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Track block size in lib/alloc.c and not in alloc_ops.  alloc_ops->free
gets the size from the malloc implementation.

Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
---
 lib/alloc.c      | 69 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 lib/alloc.h      | 33 ++++++---------------------
 lib/alloc_phys.c | 23 +++++++++----------
 lib/vmalloc.h    |  1 +
 4 files changed, 88 insertions(+), 38 deletions(-)
 create mode 100644 lib/alloc.c

diff --git a/lib/alloc.c b/lib/alloc.c
new file mode 100644
index 0000000..7ce61d2
--- /dev/null
+++ b/lib/alloc.c
@@ -0,0 +1,69 @@
+#include "alloc.h"
+
+void *malloc(size_t size)
+{
+	size = (size + sizeof(long) - 1) & -sizeof(long);
+	return memalign(sizeof(long), size);
+}
+
+void *calloc(size_t nmemb, size_t size)
+{
+	void *ptr = malloc(nmemb * size);
+	if (ptr)
+		memset(ptr, 0, nmemb * size);
+	return ptr;
+}
+
+#define METADATA_EXTRA	(2 * sizeof(uintptr_t))
+#define OFS_SLACK	(-2 * sizeof(uintptr_t))
+#define OFS_SIZE	(-sizeof(uintptr_t))
+
+static inline void *block_begin(void *mem)
+{
+	uintptr_t slack = *(uintptr_t *)(mem + OFS_SLACK);
+	return mem - slack;
+}
+
+static inline uintptr_t block_size(void *mem)
+{
+	return *(uintptr_t *)(mem + OFS_SIZE);
+}
+
+void free(void *ptr)
+{
+	if (!alloc_ops->free)
+		return;
+
+	uintptr_t base = (uintptr_t) block_begin(ptr);
+	uintptr_t sz = block_size(ptr);
+
+	alloc_ops->free((void *) base, sz);
+}
+
+void *memalign(size_t alignment, size_t size)
+{
+	void *p;
+	uintptr_t blkalign;
+	uintptr_t mem, slack;
+
+	assert(alloc_ops && alloc_ops->memalign);
+	if (alignment <= sizeof(uintptr_t))
+		alignment = sizeof(uintptr_t);
+	else
+		size += alignment - 1;
+
+	blkalign = MAX(alignment, alloc_ops->align_min);
+	p = alloc_ops->memalign(blkalign, size + METADATA_EXTRA);
+
+	/* Leave room for metadata before aligning the result.  */
+	mem = (uintptr_t)p + METADATA_EXTRA;
+	mem = ALIGN(mem, alignment);
+	slack = mem - (uintptr_t)p;
+	size = ALIGN(mem + size - 1, alloc_ops->align_min) - mem;
+
+	/* Write the metadata */
+	*(uintptr_t *)(mem + OFS_SLACK) = slack;
+	*(uintptr_t *)(mem + OFS_SIZE) = size + slack;
+	return mem;
+}
+
diff --git a/lib/alloc.h b/lib/alloc.h
index 24f85b4..f560c4a 100644
--- a/lib/alloc.h
+++ b/lib/alloc.h
@@ -24,36 +24,17 @@
 
 struct alloc_ops {
 	void *(*memalign)(size_t alignment, size_t size);
+	void (*free)(void *ptr, size_t size);
+	size_t align_min;
 };
 
 extern struct alloc_ops *alloc_ops;
 
-/*
- * Our malloc implementation is currently so simple that it can just
- * be inlined. :)
- */
-static inline void *malloc(size_t size)
-{
-	assert(alloc_ops && alloc_ops->memalign);
-	return alloc_ops->memalign(sizeof(long), size);
-}
-
-static inline void *calloc(size_t nmemb, size_t size)
-{
-	void *ptr = malloc(nmemb * size);
-	if (ptr)
-		memset(ptr, 0, nmemb * size);
-	return ptr;
-}
+void *malloc(size_t size);
+void *calloc(size_t nmemb, size_t size);
+void free(void *ptr);
+void *memalign(size_t alignment, size_t size);
 
-static inline void free(void *ptr)
-{
-}
-
-static inline void *memalign(size_t alignment, size_t size)
-{
-	assert(alloc_ops && alloc_ops->memalign);
-	return alloc_ops->memalign(alignment, size);
-}
+extern struct alloc_ops *alloc_ops;
 
 #endif /* _ALLOC_H_ */
diff --git a/lib/alloc_phys.c b/lib/alloc_phys.c
index 6befb5c..c570f6e 100644
--- a/lib/alloc_phys.c
+++ b/lib/alloc_phys.c
@@ -42,16 +42,6 @@ void phys_alloc_show(void)
 	spin_unlock(&lock);
 }
 
-void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
-{
-	spin_lock(&lock);
-	base = base_addr;
-	top = base + size;
-	align_min = DEFAULT_MINIMUM_ALIGNMENT;
-	nr_regions = 0;
-	spin_unlock(&lock);
-}
-
 void phys_alloc_set_minimum_alignment(phys_addr_t align)
 {
 	assert(align && !(align & (align - 1)));
@@ -74,8 +64,6 @@ static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
 	if (safe && sizeof(long) == 4)
 		top_safe = MIN(top_safe, 1ULL << 32);
 
-	align = MAX(align, align_min);
-
 	addr = ALIGN(base, align);
 	size += addr - base;
 
@@ -131,3 +119,14 @@ static struct alloc_ops early_alloc_ops = {
 };
 
 struct alloc_ops *alloc_ops = &early_alloc_ops;
+
+void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
+{
+	spin_lock(&lock);
+	base = base_addr;
+	top = base + size;
+	align_min = DEFAULT_MINIMUM_ALIGNMENT;
+	early_alloc_ops.align_min = align_min;
+	nr_regions = 0;
+	spin_unlock(&lock);
+}
diff --git a/lib/vmalloc.h b/lib/vmalloc.h
index 2060f3a..d4dd880 100644
--- a/lib/vmalloc.h
+++ b/lib/vmalloc.h
@@ -4,5 +4,6 @@
 void *alloc_vpages(ulong nr);
 void *alloc_vpage(void);
 void init_alloc_vpage(void *top);
+void setup_vm();
 
 #endif
-- 
2.14.2





[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux