__vunmap() calls find_vm_area() twice without an obvious reason: first directly to get the area pointer, second indirectly by calling remove_vm_area(), which is again searching for the area. To remove this redundancy, let's split remove_vm_area() into __remove_vm_area(struct vmap_area *), which performs the actual area removal, and remove_vm_area(const void *addr) wrapper, which can be used everywhere, where it has been used before. On my test setup, I've got 5-10% speed up on vfree()'ing 1000000 of 4-pages vmalloc blocks. Perf report before: 22.64% cat [kernel.vmlinux] [k] free_pcppages_bulk 10.30% cat [kernel.vmlinux] [k] __vunmap 9.80% cat [kernel.vmlinux] [k] find_vmap_area 8.11% cat [kernel.vmlinux] [k] vunmap_page_range 4.20% cat [kernel.vmlinux] [k] __slab_free 3.56% cat [kernel.vmlinux] [k] __list_del_entry_valid 3.46% cat [kernel.vmlinux] [k] smp_call_function_many 3.33% cat [kernel.vmlinux] [k] kfree 3.32% cat [kernel.vmlinux] [k] free_unref_page Perf report after: 23.01% cat [kernel.kallsyms] [k] free_pcppages_bulk 9.46% cat [kernel.kallsyms] [k] __vunmap 9.15% cat [kernel.kallsyms] [k] vunmap_page_range 6.17% cat [kernel.kallsyms] [k] __slab_free 5.61% cat [kernel.kallsyms] [k] kfree 4.86% cat [kernel.kallsyms] [k] bad_range 4.67% cat [kernel.kallsyms] [k] free_unref_page_commit 4.24% cat [kernel.kallsyms] [k] __list_del_entry_valid 3.68% cat [kernel.kallsyms] [k] free_unref_page 3.65% cat [kernel.kallsyms] [k] __list_add_valid 3.19% cat [kernel.kallsyms] [k] __purge_vmap_area_lazy 3.10% cat [kernel.kallsyms] [k] find_vmap_area 3.05% cat [kernel.kallsyms] [k] rcu_cblist_dequeue Signed-off-by: Roman Gushchin <guro@xxxxxx> Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx> Reviewed-by: Matthew Wilcox <willy@xxxxxxxxxxxxx> Acked-by: Vlastimil Babka <vbabka@xxxxxxx> --- mm/vmalloc.c | 47 +++++++++++++++++++++++++++-------------------- 1 file changed, 27 insertions(+), 20 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 92b784d8088c..8ad8e8464e55 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2068,6 +2068,24 @@ struct vm_struct *find_vm_area(const void *addr) return NULL; } +static struct vm_struct *__remove_vm_area(struct vmap_area *va) +{ + struct vm_struct *vm = va->vm; + + might_sleep(); + + spin_lock(&vmap_area_lock); + va->vm = NULL; + va->flags &= ~VM_VM_AREA; + va->flags |= VM_LAZY_FREE; + spin_unlock(&vmap_area_lock); + + kasan_free_shadow(vm); + free_unmap_vmap_area(va); + + return vm; +} + /** * remove_vm_area - find and remove a continuous kernel virtual area * @addr: base address @@ -2080,31 +2098,20 @@ struct vm_struct *find_vm_area(const void *addr) */ struct vm_struct *remove_vm_area(const void *addr) { + struct vm_struct *vm = NULL; struct vmap_area *va; - might_sleep(); - va = find_vmap_area((unsigned long)addr); - if (va && va->flags & VM_VM_AREA) { - struct vm_struct *vm = va->vm; - - spin_lock(&vmap_area_lock); - va->vm = NULL; - va->flags &= ~VM_VM_AREA; - va->flags |= VM_LAZY_FREE; - spin_unlock(&vmap_area_lock); - - kasan_free_shadow(vm); - free_unmap_vmap_area(va); + if (va && va->flags & VM_VM_AREA) + vm = __remove_vm_area(va); - return vm; - } - return NULL; + return vm; } static void __vunmap(const void *addr, int deallocate_pages) { struct vm_struct *area; + struct vmap_area *va; if (!addr) return; @@ -2113,17 +2120,18 @@ static void __vunmap(const void *addr, int deallocate_pages) addr)) return; - area = find_vm_area(addr); - if (unlikely(!area)) { + va = find_vmap_area((unsigned long)addr); + if (unlikely(!va || !(va->flags & VM_VM_AREA))) { WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr); return; } + area = va->vm; debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); - remove_vm_area(addr); + __remove_vm_area(va); if (deallocate_pages) { int i; @@ -2138,7 +2146,6 @@ static void __vunmap(const void *addr, int deallocate_pages) } kfree(area); - return; } static inline void __vfree_deferred(const void *addr) -- 2.20.1