The patch titled Subject: mm/vmalloc.c: optimize to reduce arguments of alloc_vmap_area() has been added to the -mm mm-unstable branch. Its filename is mm-vmallocc-optimize-to-reduce-arguments-of-alloc_vmap_area.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-vmallocc-optimize-to-reduce-arguments-of-alloc_vmap_area.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Baoquan He <bhe@xxxxxxxxxx> Subject: mm/vmalloc.c: optimize to reduce arguments of alloc_vmap_area() Date: Sat, 9 Mar 2024 12:44:54 +0800 If called by __get_vm_area_node(), by open coding the field assignments of 'struct vm_struct *vm', and move the vm->flags and vm->caller assignments into __get_vm_area_node(), the passed in arguments 'flags' and 'caller' can be removed. This alleviates overloaded arguments passed in for alloc_vmap_area(). Link: https://lkml.kernel.org/r/20240309044454.648888-1-bhe@xxxxxxxxxx Signed-off-by: Baoquan He <bhe@xxxxxxxxxx> Reviewed-by: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/vmalloc.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) --- a/mm/vmalloc.c~mm-vmallocc-optimize-to-reduce-arguments-of-alloc_vmap_area +++ a/mm/vmalloc.c @@ -1931,8 +1931,7 @@ static struct vmap_area *alloc_vmap_area unsigned long align, unsigned long vstart, unsigned long vend, int node, gfp_t gfp_mask, - unsigned long va_flags, struct vm_struct *vm, - unsigned long flags, const void *caller) + unsigned long va_flags, struct vm_struct *vm) { struct vmap_node *vn; struct vmap_area *va; @@ -1995,8 +1994,11 @@ retry: va->vm = NULL; va->flags = (va_flags | vn_id); - if (vm) - setup_vmalloc_vm(vm, va, flags, caller); + if (vm) { + vm->addr = (void *)va->va_start; + vm->size = va->va_end - va->va_start; + va->vm = vm; + } vn = addr_to_node(va->va_start); @@ -2572,8 +2574,7 @@ static void *new_vmap_block(unsigned int va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, VMALLOC_START, VMALLOC_END, node, gfp_mask, - VMAP_RAM|VMAP_BLOCK, NULL, - 0, NULL); + VMAP_RAM|VMAP_BLOCK, NULL); if (IS_ERR(va)) { kfree(vb); return ERR_CAST(va); @@ -2931,7 +2932,7 @@ void *vm_map_ram(struct page **pages, un va = alloc_vmap_area(size, PAGE_SIZE, VMALLOC_START, VMALLOC_END, node, GFP_KERNEL, VMAP_RAM, - NULL, 0, NULL); + NULL); if (IS_ERR(va)) return NULL; @@ -3070,7 +3071,10 @@ static struct vm_struct *__get_vm_area_n if (!(flags & VM_NO_GUARD)) size += PAGE_SIZE; - va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area, flags, caller); + area->flags = flags; + area->caller = caller; + + va = alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area); if (IS_ERR(va)) { kfree(area); return NULL; _ Patches currently in -mm which might be from bhe@xxxxxxxxxx are mm-mm_initc-remove-the-useless-dma_reserve.patch x86-remove-memblock_find_dma_reserve.patch mm-mm_initc-add-new-function-calc_nr_kernel_pages.patch mm-mm_initc-remove-meaningless-calculation-of-zone-managed_pages-in-free_area_init_core.patch mm-mm_initc-remove-unneeded-calc_memmap_size.patch mm-mm_initc-remove-arch_reserved_kernel_pages.patch mm-vmallocc-optimize-to-reduce-arguments-of-alloc_vmap_area.patch