When allocating a new memory area where the mapping address range is known, it is observed that the vmap_area lock is acquired twice. The first acquisition occurs in the alloc_vmap_area() function when inserting the vm area into the vm mapping red-black tree. The second acquisition occurs in the setup_vmalloc_vm() function when updating the properties of the vm, such as flags and address, etc. Combine these two operations together in alloc_vmap_area(), which improves scalability when the vmap_area lock is contended. By doing so, the need to acquire the lock twice can also be eliminated. With the above change, tested on intel icelake platform(160 vcpu, kernel v6.7), a 6% performance improvement and a 7% reduction in overall spinlock hotspot are gained on stress-ng/pthread(https://github.com/ColinIanKing/stress-ng), which is the stress test of thread creations. Reviewed-by: Chen Tim C <tim.c.chen@xxxxxxxxx> Reviewed-by: King Colin <colin.king@xxxxxxxxx> Signed-off-by: rulinhuang <rulin.huang@xxxxxxxxx> --- V1 -> V2: Avoided the partial initialization issue of vm and separated insert_vmap_area() from alloc_vmap_area() V2 -> V3: Rebased on 6.8-rc5 V3 -> V4: Rebased on mm-unstable branch --- mm/vmalloc.c | 43 +++++++++++++++++++++++-------------------- 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 25a8df497255..ce126e7bc3d8 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1851,7 +1851,6 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, int node, gfp_t gfp_mask, unsigned long va_flags) { - struct vmap_node *vn; struct vmap_area *va; unsigned long freed; unsigned long addr; @@ -1912,19 +1911,18 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, va->vm = NULL; va->flags = (va_flags | vn_id); - vn = addr_to_node(va->va_start); - - spin_lock(&vn->busy.lock); - insert_vmap_area(va, &vn->busy.root, &vn->busy.head); - spin_unlock(&vn->busy.lock); - BUG_ON(!IS_ALIGNED(va->va_start, align)); BUG_ON(va->va_start < vstart); BUG_ON(va->va_end > vend); ret = kasan_populate_vmalloc(addr, size); if (ret) { - free_vmap_area(va); + /* + * Insert/Merge it back to the free tree/list. + */ + spin_lock(&free_vmap_area_lock); + merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list); + spin_unlock(&free_vmap_area_lock); return ERR_PTR(ret); } @@ -1953,6 +1951,15 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, return ERR_PTR(-EBUSY); } +static inline void insert_vmap_area_locked(struct vmap_area *va) +{ + struct vmap_node *vn = addr_to_node(va->va_start); + + spin_lock(&vn->busy.lock); + insert_vmap_area(va, &vn->busy.root, &vn->busy.head); + spin_unlock(&vn->busy.lock); +} + int register_vmap_purge_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&vmap_notify_list, nb); @@ -2492,6 +2499,8 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) return ERR_CAST(va); } + insert_vmap_area_locked(va); + vaddr = vmap_block_vaddr(va->va_start, 0); spin_lock_init(&vb->lock); vb->va = va; @@ -2847,6 +2856,8 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node) if (IS_ERR(va)) return NULL; + insert_vmap_area_locked(va); + addr = va->va_start; mem = (void *)addr; } @@ -2946,7 +2957,7 @@ void __init vm_area_register_early(struct vm_struct *vm, size_t align) kasan_populate_early_vm_area_shadow(vm->addr, vm->size); } -static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, +static inline void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, unsigned long flags, const void *caller) { vm->flags = flags; @@ -2956,16 +2967,6 @@ static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, va->vm = vm; } -static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, - unsigned long flags, const void *caller) -{ - struct vmap_node *vn = addr_to_node(va->va_start); - - spin_lock(&vn->busy.lock); - setup_vmalloc_vm_locked(vm, va, flags, caller); - spin_unlock(&vn->busy.lock); -} - static void clear_vm_uninitialized_flag(struct vm_struct *vm) { /* @@ -3010,6 +3011,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, setup_vmalloc_vm(area, va, flags, caller); + insert_vmap_area_locked(va); + /* * Mark pages for non-VM_ALLOC mappings as accessible. Do it now as a * best-effort approach, as they can be mapped outside of vmalloc code. @@ -4584,7 +4587,7 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, spin_lock(&vn->busy.lock); insert_vmap_area(vas[area], &vn->busy.root, &vn->busy.head); - setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, + setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC, pcpu_get_vm_areas); spin_unlock(&vn->busy.lock); } base-commit: 9d193b36872d153e02e80c26203de4ee15127b58 -- 2.39.3