Introduce ne_fit_preload()/ne_fit_preload_end() functions for preloading one extra vmap_area object to ensure that we have it available when fit type is NE_FIT_TYPE. The preload is done per CPU and with GFP_KERNEL permissive allocation masks, which allow to be more stable under low memory condition and high memory pressure. Signed-off-by: Uladzislau Rezki (Sony) <urezki@xxxxxxxxx> --- mm/vmalloc.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 78 insertions(+), 3 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ea1b65fac599..5302e1b79c7b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -364,6 +364,13 @@ static LIST_HEAD(free_vmap_area_list); */ static struct rb_root free_vmap_area_root = RB_ROOT; +/* + * Preload a CPU with one object for "no edge" split case. The + * aim is to get rid of allocations from the atomic context, thus + * to use more permissive allocation masks. + */ +static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node); + static __always_inline unsigned long va_size(struct vmap_area *va) { @@ -950,9 +957,24 @@ adjust_va_to_fit_type(struct vmap_area *va, * L V NVA V R * |---|-------|---| */ - lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); - if (unlikely(!lva)) - return -1; + lva = __this_cpu_xchg(ne_fit_preload_node, NULL); + if (unlikely(!lva)) { + /* + * For percpu allocator we do not do any pre-allocation + * and leave it as it is. The reason is it most likely + * never ends up with NE_FIT_TYPE splitting. In case of + * percpu allocations offsets and sizes are aligned to + * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE + * are its main fitting cases. + * + * There are few exceptions though, as en example it is + * a first allocation(early boot up) when we have "one" + * big free space that has to be split. + */ + lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT); + if (!lva) + return -1; + } /* * Build the remainder. @@ -1023,6 +1045,50 @@ __alloc_vmap_area(unsigned long size, unsigned long align, } /* + * Preload this CPU with one extra vmap_area object to ensure + * that we have it available when fit type of free area is + * NE_FIT_TYPE. + * + * The preload is done in non-atomic context thus, it allows us + * to use more permissive allocation masks, therefore to be more + * stable under low memory condition and high memory pressure. + * + * If success, it returns zero with preemption disabled. In case + * of error, (-ENOMEM) is returned with preemption not disabled. + * Note it has to be paired with alloc_vmap_area_preload_end(). + */ +static void +ne_fit_preload(int *preloaded) +{ + preempt_disable(); + + if (!__this_cpu_read(ne_fit_preload_node)) { + struct vmap_area *node; + + preempt_enable(); + node = kmem_cache_alloc(vmap_area_cachep, GFP_KERNEL); + if (node == NULL) { + *preloaded = 0; + return; + } + + preempt_disable(); + + if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, node)) + kmem_cache_free(vmap_area_cachep, node); + } + + *preloaded = 1; +} + +static void +ne_fit_preload_end(int preloaded) +{ + if (preloaded) + preempt_enable(); +} + +/* * Allocate a region of KVA of the specified size and alignment, within the * vstart and vend. */ @@ -1034,6 +1100,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, struct vmap_area *va; unsigned long addr; int purged = 0; + int preloaded; BUG_ON(!size); BUG_ON(offset_in_page(size)); @@ -1056,6 +1123,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); retry: + /* + * Even if it fails we do not really care about that. + * Just proceed as it is. "overflow" path will refill + * the cache we allocate from. + */ + ne_fit_preload(&preloaded); spin_lock(&vmap_area_lock); /* @@ -1063,6 +1136,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, * returned. Therefore trigger the overflow path. */ addr = __alloc_vmap_area(size, align, vstart, vend); + ne_fit_preload_end(preloaded); + if (unlikely(addr == vend)) goto overflow; -- 2.11.0