The patch titled Subject: mm/vmalloc.c: refactor __vunmap() to avoid duplicated call to find_vm_area() has been added to the -mm tree. Its filename is mm-refactor-__vunmap-to-avoid-duplicated-call-to-find_vm_area.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-refactor-__vunmap-to-avoid-duplicated-call-to-find_vm_area.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-refactor-__vunmap-to-avoid-duplicated-call-to-find_vm_area.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Roman Gushchin <guroan@xxxxxxxxx> Subject: mm/vmalloc.c: refactor __vunmap() to avoid duplicated call to find_vm_area() __vunmap() calls find_vm_area() twice without an obvious reason: first directly to get the area pointer, second indirectly by calling remove_vm_area(), which is again searching for the area. To remove this redundancy, let's split remove_vm_area() into __remove_vm_area(struct vmap_area *), which performs the actual area removal, and remove_vm_area(const void *addr) wrapper, which can be used everywhere, where it has been used before. On my test setup, I've got 5-10% speed up on vfree()'ing 1000000 of 4-pages vmalloc blocks. Perf report before: 22.64% cat [kernel.vmlinux] [k] free_pcppages_bulk 10.30% cat [kernel.vmlinux] [k] __vunmap 9.80% cat [kernel.vmlinux] [k] find_vmap_area 8.11% cat [kernel.vmlinux] [k] vunmap_page_range 4.20% cat [kernel.vmlinux] [k] __slab_free 3.56% cat [kernel.vmlinux] [k] __list_del_entry_valid 3.46% cat [kernel.vmlinux] [k] smp_call_function_many 3.33% cat [kernel.vmlinux] [k] kfree 3.32% cat [kernel.vmlinux] [k] free_unref_page Perf report after: 23.01% cat [kernel.kallsyms] [k] free_pcppages_bulk 9.46% cat [kernel.kallsyms] [k] __vunmap 9.15% cat [kernel.kallsyms] [k] vunmap_page_range 6.17% cat [kernel.kallsyms] [k] __slab_free 5.61% cat [kernel.kallsyms] [k] kfree 4.86% cat [kernel.kallsyms] [k] bad_range 4.67% cat [kernel.kallsyms] [k] free_unref_page_commit 4.24% cat [kernel.kallsyms] [k] __list_del_entry_valid 3.68% cat [kernel.kallsyms] [k] free_unref_page 3.65% cat [kernel.kallsyms] [k] __list_add_valid 3.19% cat [kernel.kallsyms] [k] __purge_vmap_area_lazy 3.10% cat [kernel.kallsyms] [k] find_vmap_area 3.05% cat [kernel.kallsyms] [k] rcu_cblist_dequeue Link: http://lkml.kernel.org/r/20190417194002.12369-2-guro@xxxxxx Signed-off-by: Roman Gushchin <guro@xxxxxx> Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx> Reviewed-by: Matthew Wilcox <willy@xxxxxxxxxxxxx> Acked-by: Vlastimil Babka <vbabka@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/vmalloc.c | 47 +++++++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 20 deletions(-) --- a/mm/vmalloc.c~mm-refactor-__vunmap-to-avoid-duplicated-call-to-find_vm_area +++ a/mm/vmalloc.c @@ -1471,6 +1471,24 @@ struct vm_struct *find_vm_area(const voi return NULL; } +static struct vm_struct *__remove_vm_area(struct vmap_area *va) +{ + struct vm_struct *vm = va->vm; + + might_sleep(); + + spin_lock(&vmap_area_lock); + va->vm = NULL; + va->flags &= ~VM_VM_AREA; + va->flags |= VM_LAZY_FREE; + spin_unlock(&vmap_area_lock); + + kasan_free_shadow(vm); + free_unmap_vmap_area(va); + + return vm; +} + /** * remove_vm_area - find and remove a continuous kernel virtual area * @addr: base address @@ -1483,31 +1501,20 @@ struct vm_struct *find_vm_area(const voi */ struct vm_struct *remove_vm_area(const void *addr) { + struct vm_struct *vm = NULL; struct vmap_area *va; - might_sleep(); - va = find_vmap_area((unsigned long)addr); - if (va && va->flags & VM_VM_AREA) { - struct vm_struct *vm = va->vm; - - spin_lock(&vmap_area_lock); - va->vm = NULL; - va->flags &= ~VM_VM_AREA; - va->flags |= VM_LAZY_FREE; - spin_unlock(&vmap_area_lock); - - kasan_free_shadow(vm); - free_unmap_vmap_area(va); + if (va && va->flags & VM_VM_AREA) + vm = __remove_vm_area(va); - return vm; - } - return NULL; + return vm; } static void __vunmap(const void *addr, int deallocate_pages) { struct vm_struct *area; + struct vmap_area *va; if (!addr) return; @@ -1516,17 +1523,18 @@ static void __vunmap(const void *addr, i addr)) return; - area = find_vm_area(addr); - if (unlikely(!area)) { + va = find_vmap_area((unsigned long)addr); + if (unlikely(!va || !(va->flags & VM_VM_AREA))) { WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); return; } + area = va->vm; debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); - remove_vm_area(addr); + __remove_vm_area(va); if (deallocate_pages) { int i; @@ -1541,7 +1549,6 @@ static void __vunmap(const void *addr, i } kfree(area); - return; } static inline void __vfree_deferred(const void *addr) _ Patches currently in -mm which might be from guroan@xxxxxxxxx are mm-refactor-__vunmap-to-avoid-duplicated-call-to-find_vm_area.patch mm-show-number-of-vmalloc-pages-in-proc-meminfo.patch