Introduce vmap_alloc() to simply get the address space. This allows for code sharing in the next patch. Suggested-by: Uladzislau Rezki <urezki@xxxxxxxxx> Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/vmalloc.c | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ccaa461998f3..dcab1d3cf185 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2230,6 +2230,27 @@ void vm_unmap_ram(const void *mem, unsigned int count) } EXPORT_SYMBOL(vm_unmap_ram); +static void *vmap_alloc(size_t size, int node) +{ + void *mem; + + if (likely(size <= (VMAP_MAX_ALLOC * PAGE_SIZE))) { + mem = vb_alloc(size, GFP_KERNEL); + if (IS_ERR(mem)) + mem = NULL; + } else { + struct vmap_area *va; + va = alloc_vmap_area(size, PAGE_SIZE, + VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); + if (IS_ERR(va)) + mem = NULL; + else + mem = (void *)va->va_start; + } + + return mem; +} + /** * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space) * @pages: an array of pointers to the pages to be mapped @@ -2247,24 +2268,8 @@ EXPORT_SYMBOL(vm_unmap_ram); void *vm_map_ram(struct page **pages, unsigned int count, int node) { unsigned long size = (unsigned long)count << PAGE_SHIFT; - unsigned long addr; - void *mem; - - if (likely(count <= VMAP_MAX_ALLOC)) { - mem = vb_alloc(size, GFP_KERNEL); - if (IS_ERR(mem)) - return NULL; - addr = (unsigned long)mem; - } else { - struct vmap_area *va; - va = alloc_vmap_area(size, PAGE_SIZE, - VMALLOC_START, VMALLOC_END, node, GFP_KERNEL); - if (IS_ERR(va)) - return NULL; - - addr = va->va_start; - mem = (void *)addr; - } + void *mem = vmap_alloc(size, node); + unsigned long addr = (unsigned long)mem; if (vmap_pages_range(addr, addr + size, PAGE_KERNEL, pages, PAGE_SHIFT) < 0) { -- 2.35.1