vb_alloc() unconditionally locks a vmap_block on the free list to check the free space. This can be done locklessly because vmap_block::free never increases, it's only decreased on allocations. Check the free space lockless and only if that succeeds, recheck under the lock. Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> --- mm/vmalloc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2166,6 +2166,9 @@ static void *vb_alloc(unsigned long size list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long pages_off; + if (READ_ONCE(vb->free) < (1UL << order)) + continue; + spin_lock(&vb->lock); if (vb->free < (1UL << order)) { spin_unlock(&vb->lock); @@ -2174,7 +2177,7 @@ static void *vb_alloc(unsigned long size pages_off = VMAP_BBMAP_BITS - vb->free; vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); - vb->free -= 1UL << order; + WRITE_ONCE(vb->free, vb->free - (1UL << order)); bitmap_set(vb->used_map, pages_off, (1UL << order)); if (vb->free == 0) { spin_lock(&vbq->lock);