The patch titled Subject: mm-vmap-keep-track-of-free-blocks-for-vmap-allocation-v4 has been removed from the -mm tree. Its filename was mm-vmap-keep-track-of-free-blocks-for-vmap-allocation-v4.patch This patch was dropped because it was folded into mm-vmap-keep-track-of-free-blocks-for-vmap-allocation.patch ------------------------------------------------------ From: "Uladzislau Rezki (Sony)" <urezki@xxxxxxxxx> Subject: mm-vmap-keep-track-of-free-blocks-for-vmap-allocation-v4 - updated the commit message of [1] patch; - simplify te compute_subtree_max_size() function by using max3() macro; - added more explanation to find_va_links() function; - reworked the function names; - replace u8 type by using enum fit_type; - when init the vmap free space, trigger WARN_ON_ONCE() if kmem_cache* fails; - reworked a bit the pvm_determine_end_from_reverse() function; - invert "if" condition in __get_va_next_sibling(); - removed intermediate function in [2] patch. Link: http://lkml.kernel.org/r/20190406183508.25273-2-urezki@xxxxxxxxx Signed-off-by: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxx> Cc: Joel Fernandes <joelaf@xxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Oleksiy Avramchenko <oleksiy.avramchenko@xxxxxxxxxxxxxx> Cc: Roman Gushchin <guro@xxxxxx> Cc: Steven Rostedt <rostedt@xxxxxxxxxxx> Cc: Tejun Heo <tj@xxxxxxxxxx> Cc: Thomas Garnier <thgarnie@xxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/vmalloc.c | 202 ++++++++++++++++++++++++------------------------- 1 file changed, 102 insertions(+), 100 deletions(-) --- a/mm/vmalloc.c~mm-vmap-keep-track-of-free-blocks-for-vmap-allocation-v4 +++ a/mm/vmalloc.c @@ -322,7 +322,9 @@ unsigned long vmalloc_to_pfn(const void } EXPORT_SYMBOL(vmalloc_to_pfn); + /*** Global kva allocator ***/ + #define VM_LAZY_FREE 0x02 #define VM_VM_AREA 0x04 @@ -360,7 +362,7 @@ static LIST_HEAD(free_vmap_area_list); static struct rb_root free_vmap_area_root = RB_ROOT; static __always_inline unsigned long -__va_size(struct vmap_area *va) +va_size(struct vmap_area *va) { return (va->va_end - va->va_start); } @@ -380,18 +382,9 @@ get_subtree_max_size(struct rb_node *nod static __always_inline unsigned long compute_subtree_max_size(struct vmap_area *va) { - unsigned long max_size = __va_size(va); - unsigned long child_max_size; - - child_max_size = get_subtree_max_size(va->rb_node.rb_right); - if (child_max_size > max_size) - max_size = child_max_size; - - child_max_size = get_subtree_max_size(va->rb_node.rb_left); - if (child_max_size > max_size) - max_size = child_max_size; - - return max_size; + return max3(va_size(va), + get_subtree_max_size(va->rb_node.rb_left), + get_subtree_max_size(va->rb_node.rb_right)); } RB_DECLARE_CALLBACKS(static, free_vmap_area_rb_augment_cb, @@ -426,7 +419,7 @@ static struct vmap_area *__find_vmap_are * and its left or right link for further processing. */ static __always_inline struct rb_node ** -__find_va_links(struct vmap_area *va, +find_va_links(struct vmap_area *va, struct rb_root *root, struct rb_node *from, struct rb_node **parent) { @@ -444,7 +437,9 @@ __find_va_links(struct vmap_area *va, } /* - * Go to the bottom of the tree. + * Go to the bottom of the tree. When we hit the last point + * we end up with parent rb_node and correct direction, i name + * it link, where the new va->rb_node will be attached to. */ do { tmp_va = rb_entry(*link, struct vmap_area, rb_node); @@ -469,26 +464,25 @@ __find_va_links(struct vmap_area *va, } static __always_inline struct list_head * -__get_va_next_sibling(struct rb_node *parent, struct rb_node **link) +get_va_next_sibling(struct rb_node *parent, struct rb_node **link) { struct list_head *list; - if (likely(parent)) { - list = &rb_entry(parent, struct vmap_area, rb_node)->list; - return (&parent->rb_right == link ? list->next:list); - } + if (unlikely(!parent)) + /* + * The red-black tree where we try to find VA neighbors + * before merging or inserting is empty, i.e. it means + * there is no free vmap space. Normally it does not + * happen but we handle this case anyway. + */ + return NULL; - /* - * The red-black tree where we try to find VA neighbors - * before merging or inserting is empty, i.e. it means - * there is no free vmap space. Normally it does not - * happen but we handle this case anyway. - */ - return NULL; + list = &rb_entry(parent, struct vmap_area, rb_node)->list; + return (&parent->rb_right == link ? list->next : list); } static __always_inline void -__link_va(struct vmap_area *va, struct rb_root *root, +link_va(struct vmap_area *va, struct rb_root *root, struct rb_node *parent, struct rb_node **link, struct list_head *head) { /* @@ -527,7 +521,7 @@ __link_va(struct vmap_area *va, struct r } static __always_inline void -__unlink_va(struct vmap_area *va, struct rb_root *root) +unlink_va(struct vmap_area *va, struct rb_root *root) { /* * During merging a VA node can be empty, therefore @@ -573,7 +567,7 @@ __unlink_va(struct vmap_area *va, struct * node becomes 4--6. */ static __always_inline void -__augment_tree_propagate_from(struct vmap_area *va) +augment_tree_propagate_from(struct vmap_area *va) { struct rb_node *node = &va->rb_node; unsigned long new_va_sub_max_size; @@ -597,18 +591,18 @@ __augment_tree_propagate_from(struct vma } static void -__insert_vmap_area(struct vmap_area *va, +insert_vmap_area(struct vmap_area *va, struct rb_root *root, struct list_head *head) { struct rb_node **link; struct rb_node *parent; - link = __find_va_links(va, root, NULL, &parent); - __link_va(va, root, parent, link, head); + link = find_va_links(va, root, NULL, &parent); + link_va(va, root, parent, link, head); } static void -__insert_vmap_area_augment(struct vmap_area *va, +insert_vmap_area_augment(struct vmap_area *va, struct rb_node *from, struct rb_root *root, struct list_head *head) { @@ -616,12 +610,12 @@ __insert_vmap_area_augment(struct vmap_a struct rb_node *parent; if (from) - link = __find_va_links(va, NULL, from, &parent); + link = find_va_links(va, NULL, from, &parent); else - link = __find_va_links(va, root, NULL, &parent); + link = find_va_links(va, root, NULL, &parent); - __link_va(va, root, parent, link, head); - __augment_tree_propagate_from(va); + link_va(va, root, parent, link, head); + augment_tree_propagate_from(va); } /* @@ -631,7 +625,7 @@ __insert_vmap_area_augment(struct vmap_a * freed. */ static __always_inline void -__merge_or_add_vmap_area(struct vmap_area *va, +merge_or_add_vmap_area(struct vmap_area *va, struct rb_root *root, struct list_head *head) { struct vmap_area *sibling; @@ -644,12 +638,12 @@ __merge_or_add_vmap_area(struct vmap_are * Find a place in the tree where VA potentially will be * inserted, unless it is merged with its sibling/siblings. */ - link = __find_va_links(va, root, NULL, &parent); + link = find_va_links(va, root, NULL, &parent); /* * Get next node of VA to check if merging can be done. */ - next = __get_va_next_sibling(parent, link); + next = get_va_next_sibling(parent, link); if (unlikely(next == NULL)) goto insert; @@ -666,10 +660,10 @@ __merge_or_add_vmap_area(struct vmap_are sibling->va_start = va->va_start; /* Check and update the tree if needed. */ - __augment_tree_propagate_from(sibling); + augment_tree_propagate_from(sibling); /* Remove this VA, it has been merged. */ - __unlink_va(va, root); + unlink_va(va, root); /* Free vmap_area object. */ kmem_cache_free(vmap_area_cachep, va); @@ -693,10 +687,10 @@ __merge_or_add_vmap_area(struct vmap_are sibling->va_end = va->va_end; /* Check and update the tree if needed. */ - __augment_tree_propagate_from(sibling); + augment_tree_propagate_from(sibling); /* Remove this VA, it has been merged. */ - __unlink_va(va, root); + unlink_va(va, root); /* Free vmap_area object. */ kmem_cache_free(vmap_area_cachep, va); @@ -707,8 +701,8 @@ __merge_or_add_vmap_area(struct vmap_are insert: if (!merged) { - __link_va(va, root, parent, link, head); - __augment_tree_propagate_from(va); + link_va(va, root, parent, link, head); + augment_tree_propagate_from(va); } } @@ -737,7 +731,7 @@ is_within_this_va(struct vmap_area *va, * parameters. */ static __always_inline struct vmap_area * -__find_vmap_lowest_match(unsigned long size, +find_vmap_lowest_match(unsigned long size, unsigned long align, unsigned long vstart) { struct vmap_area *va; @@ -792,7 +786,7 @@ __find_vmap_lowest_match(unsigned long s return NULL; } -enum alloc_fit_type { +enum fit_type { NOTHING_FIT = 0, FL_FIT_TYPE = 1, /* full fit */ LE_FIT_TYPE = 2, /* left edge fit */ @@ -800,11 +794,11 @@ enum alloc_fit_type { NE_FIT_TYPE = 4 /* no edge fit */ }; -static __always_inline u8 -__classify_va_fit_type(struct vmap_area *va, +static __always_inline enum fit_type +classify_va_fit_type(struct vmap_area *va, unsigned long nva_start_addr, unsigned long size) { - u8 fit_type; + enum fit_type type; /* Check if it is within VA. */ if (nva_start_addr < va->va_start || @@ -814,25 +808,26 @@ __classify_va_fit_type(struct vmap_area /* Now classify. */ if (va->va_start == nva_start_addr) { if (va->va_end == nva_start_addr + size) - fit_type = FL_FIT_TYPE; + type = FL_FIT_TYPE; else - fit_type = LE_FIT_TYPE; + type = LE_FIT_TYPE; } else if (va->va_end == nva_start_addr + size) { - fit_type = RE_FIT_TYPE; + type = RE_FIT_TYPE; } else { - fit_type = NE_FIT_TYPE; + type = NE_FIT_TYPE; } - return fit_type; + return type; } static __always_inline int -__adjust_va_to_fit_type(struct vmap_area *va, - unsigned long nva_start_addr, unsigned long size, u8 fit_type) +adjust_va_to_fit_type(struct vmap_area *va, + unsigned long nva_start_addr, unsigned long size, + enum fit_type type) { struct vmap_area *lva; - if (fit_type == FL_FIT_TYPE) { + if (type == FL_FIT_TYPE) { /* * No need to split VA, it fully fits. * @@ -840,9 +835,9 @@ __adjust_va_to_fit_type(struct vmap_area * V NVA V * |---------------| */ - __unlink_va(va, &free_vmap_area_root); + unlink_va(va, &free_vmap_area_root); kmem_cache_free(vmap_area_cachep, va); - } else if (fit_type == LE_FIT_TYPE) { + } else if (type == LE_FIT_TYPE) { /* * Split left edge of fit VA. * @@ -851,7 +846,7 @@ __adjust_va_to_fit_type(struct vmap_area * |-------|-------| */ va->va_start += size; - } else if (fit_type == RE_FIT_TYPE) { + } else if (type == RE_FIT_TYPE) { /* * Split right edge of fit VA. * @@ -860,7 +855,7 @@ __adjust_va_to_fit_type(struct vmap_area * |-------|-------| */ va->va_end = nva_start_addr; - } else if (fit_type == NE_FIT_TYPE) { + } else if (type == NE_FIT_TYPE) { /* * Split no edge of fit VA. * @@ -886,11 +881,11 @@ __adjust_va_to_fit_type(struct vmap_area return -1; } - if (fit_type != FL_FIT_TYPE) { - __augment_tree_propagate_from(va); + if (type != FL_FIT_TYPE) { + augment_tree_propagate_from(va); - if (fit_type == NE_FIT_TYPE) - __insert_vmap_area_augment(lva, &va->rb_node, + if (type == NE_FIT_TYPE) + insert_vmap_area_augment(lva, &va->rb_node, &free_vmap_area_root, &free_vmap_area_list); } @@ -907,10 +902,10 @@ __alloc_vmap_area(unsigned long size, un { unsigned long nva_start_addr; struct vmap_area *va; - u8 fit_type; + enum fit_type type; int ret; - va = __find_vmap_lowest_match(size, align, vstart); + va = find_vmap_lowest_match(size, align, vstart); if (unlikely(!va)) return vend; @@ -924,12 +919,12 @@ __alloc_vmap_area(unsigned long size, un return vend; /* Classify what we have found. */ - fit_type = __classify_va_fit_type(va, nva_start_addr, size); - if (WARN_ON_ONCE(fit_type == NOTHING_FIT)) + type = classify_va_fit_type(va, nva_start_addr, size); + if (WARN_ON_ONCE(type == NOTHING_FIT)) return vend; /* Update the free vmap_area. */ - ret = __adjust_va_to_fit_type(va, nva_start_addr, size, fit_type); + ret = adjust_va_to_fit_type(va, nva_start_addr, size, type); if (ret) return vend; @@ -983,7 +978,7 @@ retry: va->va_start = addr; va->va_end = addr + size; va->flags = 0; - __insert_vmap_area(va, &vmap_area_root, &vmap_area_list); + insert_vmap_area(va, &vmap_area_root, &vmap_area_list); spin_unlock(&vmap_area_lock); @@ -1037,12 +1032,12 @@ static void __free_vmap_area(struct vmap /* * Remove from the busy tree/list. */ - __unlink_va(va, &vmap_area_root); + unlink_va(va, &vmap_area_root); /* * Merge VA with its neighbors, otherwise just add it. */ - __merge_or_add_vmap_area(va, + merge_or_add_vmap_area(va, &free_vmap_area_root, &free_vmap_area_list); } @@ -1725,11 +1720,14 @@ static void vmap_init_free_space(void) list_for_each_entry(busy, &vmap_area_list, list) { if (busy->va_start - vmap_start > 0) { free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); - free->va_start = vmap_start; - free->va_end = busy->va_start; - - __insert_vmap_area_augment(free, NULL, - &free_vmap_area_root, &free_vmap_area_list); + if (!WARN_ON_ONCE(!free)) { + free->va_start = vmap_start; + free->va_end = busy->va_start; + + insert_vmap_area_augment(free, NULL, + &free_vmap_area_root, + &free_vmap_area_list); + } } vmap_start = busy->va_end; @@ -1737,11 +1735,14 @@ static void vmap_init_free_space(void) if (vmap_end - vmap_start > 0) { free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); - free->va_start = vmap_start; - free->va_end = vmap_end; + if (!WARN_ON_ONCE(!free)) { + free->va_start = vmap_start; + free->va_end = vmap_end; - __insert_vmap_area_augment(free, NULL, - &free_vmap_area_root, &free_vmap_area_list); + insert_vmap_area_augment(free, NULL, + &free_vmap_area_root, + &free_vmap_area_list); + } } } @@ -1771,11 +1772,14 @@ void __init vmalloc_init(void) /* Import existing vmlist entries. */ for (tmp = vmlist; tmp; tmp = tmp->next) { va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); + if (WARN_ON_ONCE(!va)) + continue; + va->flags = VM_VM_AREA; va->va_start = (unsigned long)tmp->addr; va->va_end = va->va_start + tmp->size; va->vm = tmp; - __insert_vmap_area(va, &vmap_area_root, &vmap_area_list); + insert_vmap_area(va, &vmap_area_root, &vmap_area_list); } /* @@ -3021,17 +3025,15 @@ pvm_determine_end_from_reverse(struct vm unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); unsigned long addr; - if (unlikely(!(*va))) - goto leave; - - list_for_each_entry_from_reverse((*va), - &free_vmap_area_list, list) { - addr = min((*va)->va_end & ~(align - 1), vmalloc_end); - if ((*va)->va_start < addr) - return addr; + if (likely(*va)) { + list_for_each_entry_from_reverse((*va), + &free_vmap_area_list, list) { + addr = min((*va)->va_end & ~(align - 1), vmalloc_end); + if ((*va)->va_start < addr) + return addr; + } } -leave: return 0; } @@ -3070,7 +3072,7 @@ struct vm_struct **pcpu_get_vm_areas(con int area, area2, last_area, term_area; unsigned long base, start, size, end, last_end; bool purged = false; - u8 fit_type; + enum fit_type type; /* verify parameters and allocate data structures */ BUG_ON(offset_in_page(align) || !is_power_of_2(align)); @@ -3171,12 +3173,12 @@ retry: /* It is a BUG(), but trigger recovery instead. */ goto recovery; - fit_type = __classify_va_fit_type(va, start, size); - if (WARN_ON_ONCE(fit_type == NOTHING_FIT)) + type = classify_va_fit_type(va, start, size); + if (WARN_ON_ONCE(type == NOTHING_FIT)) /* It is a BUG(), but trigger recovery instead. */ goto recovery; - ret = __adjust_va_to_fit_type(va, start, size, fit_type); + ret = adjust_va_to_fit_type(va, start, size, type); if (unlikely(ret)) goto recovery; @@ -3185,7 +3187,7 @@ retry: va->va_start = start; va->va_end = start + size; - __insert_vmap_area(va, &vmap_area_root, &vmap_area_list); + insert_vmap_area(va, &vmap_area_root, &vmap_area_list); } spin_unlock(&vmap_area_lock); _ Patches currently in -mm which might be from urezki@xxxxxxxxx are mm-vmap-keep-track-of-free-blocks-for-vmap-allocation.patch mm-vmap-add-debug_augment_propagate_check-macro.patch mm-vmap-add-debug_augment_propagate_check-macro-v4.patch mm-vmap-add-debug_augment_lowest_match_check-macro.patch mm-vmap-add-debug_augment_lowest_match_check-macro-v4.patch