[PATCH v7 04/14] Introduce lib/alloc

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



alloc supplies three ingredients to the test framework that are all
related to the support of dynamic memory allocation.

The first is a set of alloc function wrappers for malloc and its
friends. Using wrappers allows test code and common code to use the
same interface for memory allocation at all stages, even though the
implementations may change with the stage, e.g. pre/post paging.

The second is a set of implementations for the alloc function
interfaces. These implementations are named early_*, as they can be
used almost immediately by the test framework.

The third is a very simple physical memory allocator, which the
early_* alloc functions build on.

Signed-off-by: Andrew Jones <drjones@xxxxxxxxxx>
---
v7: expanded from only supplying the alloc function wrappers to
    including early_* and phys_alloc [Paolo Bonzini]
---
 lib/alloc.c | 176 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 lib/alloc.h | 123 ++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 299 insertions(+)
 create mode 100644 lib/alloc.c
 create mode 100644 lib/alloc.h

diff --git a/lib/alloc.c b/lib/alloc.c
new file mode 100644
index 0000000000000..5d55e285dcd1d
--- /dev/null
+++ b/lib/alloc.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@xxxxxxxxxx>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.
+ */
+#include "alloc.h"
+#include "asm/spinlock.h"
+#include "asm/io.h"
+
+#define ALIGN_UP_MASK(x, mask)	(((x) + (mask)) & ~(mask))
+#define ALIGN_UP(x, a)		ALIGN_UP_MASK(x, (typeof(x))(a) - 1)
+#define MIN(a, b)		((a) < (b) ? (a) : (b))
+#define MAX(a, b)		((a) > (b) ? (a) : (b))
+
+#define PHYS_ALLOC_NR_REGIONS	256
+
+struct phys_alloc_region {
+	phys_addr_t base;
+	phys_addr_t size;
+};
+
+static struct phys_alloc_region regions[PHYS_ALLOC_NR_REGIONS];
+static int nr_regions;
+
+static struct spinlock lock;
+static phys_addr_t base, top, align_min;
+
+void phys_alloc_show(void)
+{
+	int i;
+
+	spin_lock(&lock);
+	printf("phys_alloc minimum alignment: 0x%llx\n", align_min);
+	for (i = 0; i < nr_regions; ++i)
+		printf("%016llx-%016llx [%s]\n",
+			regions[i].base,
+			regions[i].base + regions[i].size - 1,
+			"USED");
+	printf("%016llx-%016llx [%s]\n", base, top - 1, "FREE");
+	spin_unlock(&lock);
+}
+
+void phys_alloc_init(phys_addr_t base_addr, phys_addr_t size)
+{
+	spin_lock(&lock);
+	base = base_addr;
+	top = base + size;
+	align_min = DEFAULT_MINIMUM_ALIGNMENT;
+	spin_unlock(&lock);
+}
+
+void phys_alloc_set_minimum_alignment(phys_addr_t align)
+{
+	assert(align && !(align & (align - 1)));
+	spin_lock(&lock);
+	align_min = align;
+	spin_unlock(&lock);
+}
+
+static phys_addr_t phys_alloc_aligned_safe(phys_addr_t size,
+					   phys_addr_t align, bool safe)
+{
+	phys_addr_t addr, size_orig = size;
+	u64 top_safe = top;
+
+	if (safe && sizeof(long) == 4)
+		top_safe = MIN(top, 1ULL << 32);
+
+	align = MAX(align, align_min);
+
+	spin_lock(&lock);
+
+	addr = ALIGN_UP(base, align);
+	size += addr - base;
+
+	if ((top_safe - base) < size) {
+		printf("%s: requested=0x%llx (align=0x%llx), "
+		       "need=0x%llx, but free=0x%llx. "
+		       "top=0x%llx, top_safe=0x%llx\n", __func__,
+		       size_orig, align, size, top_safe - base,
+		       top, top_safe);
+		spin_unlock(&lock);
+		return INVALID_PHYS_ADDR;
+	}
+
+	base += size;
+
+	if (nr_regions < PHYS_ALLOC_NR_REGIONS) {
+		regions[nr_regions].base = addr;
+		regions[nr_regions].size = size_orig;
+		++nr_regions;
+	} else {
+		printf("%s: WARNING: no free log entries, "
+		       "can't log allocation...\n", __func__);
+	}
+
+	spin_unlock(&lock);
+
+	return addr;
+}
+
+static phys_addr_t phys_zalloc_aligned_safe(phys_addr_t size,
+					    phys_addr_t align, bool safe)
+{
+	phys_addr_t addr = phys_alloc_aligned_safe(size, align, safe);
+	if (addr == INVALID_PHYS_ADDR)
+		return addr;
+
+	memset(phys_to_virt(addr), 0, size);
+	return addr;
+}
+
+phys_addr_t phys_alloc_aligned(phys_addr_t size, phys_addr_t align)
+{
+	return phys_alloc_aligned_safe(size, align, false);
+}
+
+phys_addr_t phys_zalloc_aligned(phys_addr_t size, phys_addr_t align)
+{
+	return phys_zalloc_aligned_safe(size, align, false);
+}
+
+phys_addr_t phys_alloc(phys_addr_t size)
+{
+	return phys_alloc_aligned(size, align_min);
+}
+
+phys_addr_t phys_zalloc(phys_addr_t size)
+{
+	return phys_zalloc_aligned(size, align_min);
+}
+
+static void *early_malloc(size_t size)
+{
+	phys_addr_t addr = phys_alloc_aligned_safe(size, align_min, true);
+	if (addr == INVALID_PHYS_ADDR)
+		return NULL;
+
+	return phys_to_virt(addr);
+}
+
+static void *early_calloc(size_t nmemb, size_t size)
+{
+	phys_addr_t addr = phys_zalloc_aligned_safe(nmemb * size,
+						    align_min, true);
+	if (addr == INVALID_PHYS_ADDR)
+		return NULL;
+
+	return phys_to_virt(addr);
+}
+
+static void early_free(void *ptr __unused)
+{
+}
+
+static void *early_memalign(size_t alignment, size_t size)
+{
+	phys_addr_t addr;
+
+	assert(alignment && !(alignment & (alignment - 1)));
+
+	addr = phys_alloc_aligned_safe(size, alignment, true);
+	if (addr == INVALID_PHYS_ADDR)
+		return NULL;
+
+	return phys_to_virt(addr);
+}
+
+static struct alloc_ops early_alloc_ops = {
+	.malloc = early_malloc,
+	.calloc = early_calloc,
+	.free = early_free,
+	.memalign = early_memalign,
+};
+
+struct alloc_ops *alloc_ops = &early_alloc_ops;
diff --git a/lib/alloc.h b/lib/alloc.h
new file mode 100644
index 0000000000000..7a73c18bef97c
--- /dev/null
+++ b/lib/alloc.h
@@ -0,0 +1,123 @@
+#ifndef _ALLOC_H_
+#define _ALLOC_H_
+/*
+ * alloc supplies three ingredients to the test framework that are all
+ * related to the support of dynamic memory allocation.
+ *
+ * The first is a set of alloc function wrappers for malloc and its
+ * friends. Using wrappers allows test code and common code to use the
+ * same interface for memory allocation at all stages, even though the
+ * implementations may change with the stage, e.g. pre/post paging.
+ *
+ * The second is a set of implementations for the alloc function
+ * interfaces. These implementations are named early_*, as they can be
+ * used almost immediately by the test framework.
+ *
+ * The third is a very simple physical memory allocator, which the
+ * early_* alloc functions build on.
+ *
+ * Copyright (C) 2014, Red Hat Inc, Andrew Jones <drjones@xxxxxxxxxx>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.
+ */
+#include "libcflat.h"
+
+struct alloc_ops {
+	void *(*malloc)(size_t size);
+	void *(*calloc)(size_t nmemb, size_t size);
+	void (*free)(void *ptr);
+	void *(*memalign)(size_t alignment, size_t size);
+};
+
+/*
+ * alloc_ops is initialized to early_alloc_ops
+ */
+extern struct alloc_ops *alloc_ops;
+
+static inline void *malloc(size_t size)
+{
+	assert(alloc_ops && alloc_ops->malloc);
+	return alloc_ops->malloc(size);
+}
+
+static inline void *calloc(size_t nmemb, size_t size)
+{
+	assert(alloc_ops && alloc_ops->calloc);
+	return alloc_ops->calloc(nmemb, size);
+}
+
+static inline void free(void *ptr)
+{
+	assert(alloc_ops && alloc_ops->free);
+	alloc_ops->free(ptr);
+}
+
+static inline void *memalign(size_t alignment, size_t size)
+{
+	assert(alloc_ops && alloc_ops->memalign);
+	return alloc_ops->memalign(alignment, size);
+}
+
+#ifdef PHYS32
+typedef u32 phys_addr_t;
+#else
+typedef u64 phys_addr_t;
+#endif
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
+
+/*
+ * phys_alloc is a very simple allocator which allows physical memory
+ * to be partitioned into regions until all memory is allocated.
+ *
+ * Note: This is such a simple allocator that there is no way to free
+ * a region. For more complicated memory management a single region
+ * can be allocated, but then have its memory managed by a more
+ * sophisticated allocator, e.g. a page allocator.
+ */
+#define DEFAULT_MINIMUM_ALIGNMENT 32
+
+/*
+ * phys_alloc_init creates the initial free memory region of size @size
+ * at @base. The minimum alignment is set to DEFAULT_MINIMUM_ALIGNMENT.
+ */
+extern void phys_alloc_init(phys_addr_t base, phys_addr_t size);
+
+/*
+ * phys_alloc_set_minimum_alignment sets the minimum alignment to
+ * @align.
+ */
+extern void phys_alloc_set_minimum_alignment(phys_addr_t align);
+
+/*
+ * phys_alloc_aligned returns the base address of a region of size @size,
+ * where the address is aligned to @align, or INVALID_PHYS_ADDR if there
+ * isn't enough free memory to satisfy the request.
+ */
+extern phys_addr_t phys_alloc_aligned(phys_addr_t size, phys_addr_t align);
+
+/*
+ * phys_zalloc_aligned is like phys_alloc_aligned, but zeros the memory
+ * before returning the address.
+ */
+extern phys_addr_t phys_zalloc_aligned(phys_addr_t size, phys_addr_t align);
+
+/*
+ * phys_alloc returns the base address of a region of size @size, or
+ * INVALID_PHYS_ADDR if there isn't enough free memory to satisfy the
+ * request.
+ */
+extern phys_addr_t phys_alloc(phys_addr_t size);
+
+/*
+ * phys_zalloc is like phys_alloc, but zeros the memory before returning.
+ */
+extern phys_addr_t phys_zalloc(phys_addr_t size);
+
+/*
+ * phys_alloc_show outputs all currently allocated regions with the
+ * following format
+ *   <start_addr>-<end_addr> [<USED|FREE>]
+ */
+extern void phys_alloc_show(void);
+
+#endif /* _ALLOC_H_ */
-- 
1.9.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux