Signed-off-by: Claudio Imbrenda <imbrenda@xxxxxxxxxxxxx> --- lib/vmalloc.c | 105 +++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 95 insertions(+), 10 deletions(-) diff --git a/lib/vmalloc.c b/lib/vmalloc.c index e0c7b6b..aca0876 100644 --- a/lib/vmalloc.c +++ b/lib/vmalloc.c @@ -15,6 +15,13 @@ #include <bitops.h> #include "vmalloc.h" +#define VM_MAGIC 0x7E57C0DE + +struct metadata { + unsigned long npages; + unsigned long magic; +}; + static struct spinlock lock; static void *vfree_top = 0; static void *page_root; @@ -25,8 +32,14 @@ static void *page_root; * * nr is the number of pages to allocate * alignment_pages is the alignment of the allocation *in pages* + * metadata indicates whether an extra (unaligned) page needs to be allocated + * right before the main (aligned) allocation. + * + * The return value points to the first allocated virtual page, which will + * be the (potentially unaligned) metadata page if the metadata flag is + * specified. */ -void *alloc_vpages_aligned(ulong nr, unsigned int align_order) +static void *do_alloc_vpages(ulong nr, unsigned int align_order, bool metadata) { uintptr_t ptr; @@ -34,6 +47,8 @@ void *alloc_vpages_aligned(ulong nr, unsigned int align_order) ptr = (uintptr_t)vfree_top; ptr -= PAGE_SIZE * nr; ptr &= GENMASK_ULL(63, PAGE_SHIFT + align_order); + if (metadata) + ptr -= PAGE_SIZE; vfree_top = (void *)ptr; spin_unlock(&lock); @@ -41,6 +56,11 @@ void *alloc_vpages_aligned(ulong nr, unsigned int align_order) return (void *)ptr; } +void *alloc_vpages_aligned(ulong nr, unsigned int align_order) +{ + return do_alloc_vpages(nr, align_order, false); +} + void *alloc_vpages(ulong nr) { return alloc_vpages_aligned(nr, 0); @@ -69,35 +89,100 @@ void *vmap(phys_addr_t phys, size_t size) return mem; } +/* + * Allocate one page, for an object with specified alignment. + * The resulting pointer will be aligned to the required alignment, but + * intentionally not page-aligned. + */ +static void *vm_alloc_one_page(size_t alignment) +{ + void *p; + + assert(alignment >= sizeof(uintptr_t)); + assert(alignment < PAGE_SIZE); + p = alloc_vpage(); + install_page(page_root, virt_to_phys(alloc_page()), p); + /* write the magic at the beginning of the page */ + *(uintptr_t *)p = VM_MAGIC; + return (void*)((uintptr_t)p + alignment); +} + +static struct metadata *get_metadata(void *p) +{ + struct metadata *m = p; + + return m - 1; +} + /* * Allocate virtual memory, with the specified minimum alignment. + * If the allocation fits in one page, only one page is allocated. Otherwise + * enough pages are allocated for the object, plus one to keep metadata + * information about the allocation. */ static void *vm_memalign(size_t alignment, size_t size) { + struct metadata *m; phys_addr_t pa; - void *mem, *p; + uintptr_t p; + void *mem; + size_t i; + if (!size) + return NULL; assert(is_power_of_2(alignment)); + if (alignment < sizeof(uintptr_t)) + alignment = sizeof(uintptr_t); + /* it fits in one page, allocate only one page */ + if (alignment + size <= PAGE_SIZE) + return vm_alloc_one_page(alignment); size = PAGE_ALIGN(size) / PAGE_SIZE; alignment = get_order(PAGE_ALIGN(alignment) / PAGE_SIZE); - mem = p = alloc_vpages_aligned(size, alignment); - while (size--) { + mem = do_alloc_vpages(size, alignment, true); + p = (uintptr_t)mem; + /* skip the metadata page */ + mem = (void *)(p + PAGE_SIZE); + /* + * time to actually allocate the physical pages to back our virtual + * allocation; note that we need to allocate one extra page (for the + * metadata), hence the <= + */ + for (i = 0; i <= size; i++, p += PAGE_SIZE) { pa = virt_to_phys(alloc_page()); assert(pa); - install_page(page_root, pa, p); - p += PAGE_SIZE; + install_page(page_root, pa, (void *)p); } + m = get_metadata(mem); + m->npages = size; + m->magic = VM_MAGIC; return mem; } static void vm_free(void *mem, size_t size) { - while (size) { - free_page(phys_to_virt(virt_to_pte_phys(page_root, mem))); - mem += PAGE_SIZE; - size -= PAGE_SIZE; + struct metadata *m; + uintptr_t ptr, end; + + /* the pointer is not page-aligned, it was a single-page allocation */ + if (!IS_ALIGNED((uintptr_t)mem, PAGE_SIZE)) { + ptr = virt_to_pte_phys(page_root, mem) & PAGE_MASK; + assert(*(uintptr_t *)ptr == VM_MAGIC); + free_page(phys_to_virt(ptr)); + return; } + + /* the pointer is page-aligned, it was a multi-page allocation */ + m = get_metadata(mem); + assert(m->magic == VM_MAGIC); + assert(m->npages > 0); + /* free all the pages including the metadata page */ + ptr = (uintptr_t)mem - PAGE_SIZE; + end = ptr + m->npages * PAGE_SIZE; + for ( ; ptr < end; ptr += PAGE_SIZE) + free_page(phys_to_virt(virt_to_pte_phys(page_root, (void *)ptr))); + /* free the last one separately to avoid overflow issues */ + free_page(phys_to_virt(virt_to_pte_phys(page_root, (void *)ptr))); } static struct alloc_ops vmalloc_ops = { -- 2.26.2