On Wed, 22 May 2019 17:09:37 +0200 Uladzislau Rezki (Sony) wrote: > /* > + * Preload this CPU with one extra vmap_area object to ensure > + * that we have it available when fit type of free area is > + * NE_FIT_TYPE. > + * > + * The preload is done in non-atomic context thus, it allows us > + * to use more permissive allocation masks, therefore to be more > + * stable under low memory condition and high memory pressure. > + * > + * If success, it returns zero with preemption disabled. In case > + * of error, (-ENOMEM) is returned with preemption not disabled. > + * Note it has to be paired with alloc_vmap_area_preload_end(). > + */ > +static void > +ne_fit_preload(int *preloaded) > +{ > + preempt_disable(); > + > + if (!__this_cpu_read(ne_fit_preload_node)) { > + struct vmap_area *node; > + > + preempt_enable(); > + node = kmem_cache_alloc(vmap_area_cachep, GFP_KERNEL); Alternatively, can you please take another look at the upside to use the memory node parameter in alloc_vmap_area() for allocating va slab, given that this preload, unlike adjust_va_to_fit_type() is invoked with the vmap_area_lock not aquired? > + if (node == NULL) { > + *preloaded = 0; > + return; > + } > + > + preempt_disable(); > + > + if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, node)) > + kmem_cache_free(vmap_area_cachep, node); > + } > + > + *preloaded = 1; > +} > + > +static void > +ne_fit_preload_end(int preloaded) > +{ > + if (preloaded) > + preempt_enable(); > +} > + > +/* > * Allocate a region of KVA of the specified size and alignment, within the > * vstart and vend. > */ > @@ -1034,6 +1100,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, > struct vmap_area *va; > unsigned long addr; > int purged = 0; > + int preloaded; > > BUG_ON(!size); > BUG_ON(offset_in_page(size)); > @@ -1056,6 +1123,12 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, > kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); > > retry: > + /* > + * Even if it fails we do not really care about that. > + * Just proceed as it is. "overflow" path will refill > + * the cache we allocate from. > + */ > + ne_fit_preload(&preloaded); > spin_lock(&vmap_area_lock); > > /* > @@ -1063,6 +1136,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, > * returned. Therefore trigger the overflow path. > */ > addr = __alloc_vmap_area(size, align, vstart, vend); > + ne_fit_preload_end(preloaded); > + > if (unlikely(addr == vend)) > goto overflow; > > -- > 2.11.0 > Best Regards Hillf