This will allow other arches to use {alloc,free}_page. Signed-off-by: Andrew Jones <drjones@xxxxxxxxxx> --- lib/alloc.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ lib/alloc.h | 10 ++++++++++ lib/x86/vm.c | 48 ++---------------------------------------------- x86/Makefile.common | 1 + 4 files changed, 62 insertions(+), 46 deletions(-) diff --git a/lib/alloc.c b/lib/alloc.c index 1d990a803825..ce1198e2977f 100644 --- a/lib/alloc.c +++ b/lib/alloc.c @@ -5,6 +5,7 @@ */ #include "alloc.h" #include "asm/spinlock.h" +#include "asm/page.h" #include "asm/io.h" #define MIN(a, b) ((a) < (b) ? (a) : (b)) @@ -150,6 +151,54 @@ phys_addr_t phys_zalloc(phys_addr_t size) return phys_zalloc_aligned(size, phys_alloc_align_min); } +static struct spinlock heap_lock; +static void *heap_free_head; + +void heap_init(void *start, size_t size) +{ + void *p = start; + + assert(!((unsigned long)start & ~PAGE_MASK)); + + spin_lock(&heap_lock); + + heap_free_head = NULL; + + while (size >= PAGE_SIZE) { + *(void **)p = heap_free_head; + heap_free_head = p; + p += PAGE_SIZE; + size -= PAGE_SIZE; + } + + spin_unlock(&heap_lock); +} + +void *alloc_page(void) +{ + void *p; + + spin_lock(&heap_lock); + + if (!heap_free_head) + return NULL; + + p = heap_free_head; + heap_free_head = *(void **)heap_free_head; + + spin_unlock(&heap_lock); + + return p; +} + +void free_page(void *page) +{ + spin_lock(&heap_lock); + *(void **)page = heap_free_head; + heap_free_head = page; + spin_unlock(&heap_lock); +} + static void *early_malloc(size_t size) { phys_addr_t addr = phys_alloc_aligned_safe(size, diff --git a/lib/alloc.h b/lib/alloc.h index bd3c4e8ff3f6..a37330b3088a 100644 --- a/lib/alloc.h +++ b/lib/alloc.h @@ -118,4 +118,14 @@ extern phys_addr_t phys_zalloc(phys_addr_t size); */ extern void phys_alloc_show(void); +/* + * Heap page allocator + * + * After initializing with heap_init, {alloc,free}_page can be used + * to easily manage pages. + */ +extern void heap_init(void *start, size_t size); +extern void *alloc_page(void); +extern void free_page(void *page); + #endif /* _ALLOC_H_ */ diff --git a/lib/x86/vm.c b/lib/x86/vm.c index baea17e7f475..4e399f80dd31 100644 --- a/lib/x86/vm.c +++ b/lib/x86/vm.c @@ -1,56 +1,12 @@ #include "fwcfg.h" #include "vm.h" #include "libcflat.h" +#include "alloc.h" #include "asm/spinlock.h" -static struct spinlock heap_lock; static struct spinlock vm_lock; -static void *free = 0; static void *vfree_top = 0; -static void free_memory(void *mem, unsigned long size) -{ - assert(!((unsigned long)mem & ~PAGE_MASK)); - - spin_lock(&heap_lock); - - free = NULL; - - while (size >= PAGE_SIZE) { - *(void **)mem = free; - free = mem; - mem += PAGE_SIZE; - size -= PAGE_SIZE; - } - - spin_unlock(&heap_lock); -} - -void *alloc_page() -{ - void *p; - - spin_lock(&heap_lock); - - if (!free) - return NULL; - - p = free; - free = *(void **)free; - - spin_unlock(&heap_lock); - - return p; -} - -void free_page(void *page) -{ - spin_lock(&heap_lock); - *(void **)page = free; - free = page; - spin_unlock(&heap_lock); -} - extern char edata; static unsigned long end_of_memory; @@ -170,7 +126,7 @@ void setup_vm() { assert(!end_of_memory); end_of_memory = fwcfg_get_u64(FW_CFG_RAM_SIZE); - free_memory(&edata, end_of_memory - (unsigned long)&edata); + heap_init(&edata, end_of_memory - (unsigned long)&edata); setup_mmu(end_of_memory); } diff --git a/x86/Makefile.common b/x86/Makefile.common index 356d879a986b..88038e41af60 100644 --- a/x86/Makefile.common +++ b/x86/Makefile.common @@ -3,6 +3,7 @@ all: test_cases cflatobjs += lib/pci.o +cflatobjs += lib/alloc.o cflatobjs += lib/x86/io.o cflatobjs += lib/x86/smp.o cflatobjs += lib/x86/vm.o -- 2.7.4 -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html