The patch titled Subject: mm/vmalloc: do not keep unpurged areas in the busy tree has been removed from the -mm tree. Its filename was mm-vmalloc-do-not-keep-unpurged-areas-in-the-busy-tree.patch This patch was dropped because it was merged into mainline or a subsystem tree ------------------------------------------------------ From: "Uladzislau Rezki (Sony)" <urezki@xxxxxxxxx> Subject: mm/vmalloc: do not keep unpurged areas in the busy tree The busy tree can be quite big, even though the area is freed or unmapped it still stays there until "purge" logic removes it. 1) Optimize and reduce the size of "busy" tree by removing a node from it right away as soon as user triggers free paths. It is possible to do so, because the allocation is done using another augmented tree. The vmalloc test driver shows the difference, for example the "fix_size_alloc_test" is ~11% better comparing with default configuration: sudo ./test_vmalloc.sh performance <default> Summary: fix_size_alloc_test loops: 1000000 avg: 993985 usec Summary: full_fit_alloc_test loops: 1000000 avg: 973554 usec Summary: long_busy_list_alloc_test loops: 1000000 avg: 12617652 usec <default> <this patch> Summary: fix_size_alloc_test loops: 1000000 avg: 882263 usec Summary: full_fit_alloc_test loops: 1000000 avg: 973407 usec Summary: long_busy_list_alloc_test loops: 1000000 avg: 12593929 usec <this patch> 2) Since the busy tree now contains allocated areas only and does not interfere with lazily free nodes, introduce the new function show_purge_info() that dumps "unpurged" areas that is propagated through "/proc/vmallocinfo". 3) Eliminate VM_LAZY_FREE flag. Link: http://lkml.kernel.org/r/20190716152656.12255-2-lpf.vector@xxxxxxxxx Signed-off-by: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> Signed-off-by: Pengfei Li <lpf.vector@xxxxxxxxx> Cc: Roman Gushchin <guro@xxxxxx> Cc: Uladzislau Rezki <urezki@xxxxxxxxx> Cc: Hillf Danton <hdanton@xxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Oleksiy Avramchenko <oleksiy.avramchenko@xxxxxxxxxxxxxx> Cc: Steven Rostedt <rostedt@xxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/vmalloc.c | 52 +++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 44 insertions(+), 8 deletions(-) --- a/mm/vmalloc.c~mm-vmalloc-do-not-keep-unpurged-areas-in-the-busy-tree +++ a/mm/vmalloc.c @@ -329,7 +329,6 @@ EXPORT_SYMBOL(vmalloc_to_pfn); #define DEBUG_AUGMENT_PROPAGATE_CHECK 0 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 -#define VM_LAZY_FREE 0x02 #define VM_VM_AREA 0x04 static DEFINE_SPINLOCK(vmap_area_lock); @@ -1282,7 +1281,14 @@ static bool __purge_vmap_area_lazy(unsig llist_for_each_entry_safe(va, n_va, valist, purge_list) { unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; - __free_vmap_area(va); + /* + * Finally insert or merge lazily-freed area. It is + * detached and there is no need to "unlink" it from + * anything. + */ + merge_or_add_vmap_area(va, + &free_vmap_area_root, &free_vmap_area_list); + atomic_long_sub(nr, &vmap_lazy_nr); if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) @@ -1324,6 +1330,10 @@ static void free_vmap_area_noflush(struc { unsigned long nr_lazy; + spin_lock(&vmap_area_lock); + unlink_va(va, &vmap_area_root); + spin_unlock(&vmap_area_lock); + nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); @@ -2143,14 +2153,13 @@ struct vm_struct *remove_vm_area(const v might_sleep(); - va = find_vmap_area((unsigned long)addr); + spin_lock(&vmap_area_lock); + va = __find_vmap_area((unsigned long)addr); if (va && va->flags & VM_VM_AREA) { struct vm_struct *vm = va->vm; - spin_lock(&vmap_area_lock); va->vm = NULL; va->flags &= ~VM_VM_AREA; - va->flags |= VM_LAZY_FREE; spin_unlock(&vmap_area_lock); kasan_free_shadow(vm); @@ -2158,6 +2167,8 @@ struct vm_struct *remove_vm_area(const v return vm; } + + spin_unlock(&vmap_area_lock); return NULL; } @@ -3450,6 +3461,22 @@ static void show_numa_info(struct seq_fi } } +static void show_purge_info(struct seq_file *m) +{ + struct llist_node *head; + struct vmap_area *va; + + head = READ_ONCE(vmap_purge_list.first); + if (head == NULL) + return; + + llist_for_each_entry(va, head, purge_list) { + seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", + (void *)va->va_start, (void *)va->va_end, + va->va_end - va->va_start); + } +} + static int s_show(struct seq_file *m, void *p) { struct vmap_area *va; @@ -3462,10 +3489,9 @@ static int s_show(struct seq_file *m, vo * behalf of vmap area is being tear down or vm_map_ram allocation. */ if (!(va->flags & VM_VM_AREA)) { - seq_printf(m, "0x%pK-0x%pK %7ld %s\n", + seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", (void *)va->va_start, (void *)va->va_end, - va->va_end - va->va_start, - va->flags & VM_LAZY_FREE ? "unpurged vm_area" : "vm_map_ram"); + va->va_end - va->va_start); return 0; } @@ -3504,6 +3530,16 @@ static int s_show(struct seq_file *m, vo show_numa_info(m, v); seq_putc(m, '\n'); + + /* + * As a final step, dump "unpurged" areas. Note, + * that entire "/proc/vmallocinfo" output will not + * be address sorted, because the purge list is not + * sorted. + */ + if (list_is_last(&va->list, &vmap_area_list)) + show_purge_info(m); + return 0; } _ Patches currently in -mm which might be from urezki@xxxxxxxxx are