From: Vlastimil Babka <vbabka@xxxxxxx> Subject: mm, slub: stop disabling irqs around get_partial() The function get_partial() does not need to have irqs disabled as a whole. It's sufficient to convert spin_lock operations to their irq saving/restoring versions. As a result, it's now possible to reach the page allocator from the slab allocator without disabling and re-enabling interrupts on the way. Link: https://lkml.kernel.org/r/20210904105003.11688-18-vbabka@xxxxxxx Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Jann Horn <jannh@xxxxxxxxxx> Cc: Jesper Dangaard Brouer <brouer@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Mike Galbraith <efault@xxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: Qian Cai <quic_qiancai@xxxxxxxxxxx> Cc: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/slub.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) --- a/mm/slub.c~mm-slub-stop-disabling-irqs-around-get_partial +++ a/mm/slub.c @@ -2010,11 +2010,12 @@ static inline bool pfmemalloc_match(stru * Try to allocate a partial slab from a specific node. */ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, - struct page **ret_page, gfp_t flags) + struct page **ret_page, gfp_t gfpflags) { struct page *page, *page2; void *object = NULL; unsigned int available = 0; + unsigned long flags; int objects; /* @@ -2026,11 +2027,11 @@ static void *get_partial_node(struct kme if (!n || !n->nr_partial) return NULL; - spin_lock(&n->list_lock); + spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry_safe(page, page2, &n->partial, slab_list) { void *t; - if (!pfmemalloc_match(page, flags)) + if (!pfmemalloc_match(page, gfpflags)) continue; t = acquire_slab(s, n, page, object == NULL, &objects); @@ -2051,7 +2052,7 @@ static void *get_partial_node(struct kme break; } - spin_unlock(&n->list_lock); + spin_unlock_irqrestore(&n->list_lock, flags); return object; } @@ -2779,8 +2780,10 @@ new_slab: local_irq_restore(flags); goto reread_page; } - if (unlikely(!slub_percpu_partial(c))) + if (unlikely(!slub_percpu_partial(c))) { + local_irq_restore(flags); goto new_objects; /* stolen by an IRQ handler */ + } page = c->page = slub_percpu_partial(c); slub_set_percpu_partial(c, page); @@ -2789,18 +2792,9 @@ new_slab: goto redo; } - local_irq_save(flags); - if (unlikely(c->page)) { - local_irq_restore(flags); - goto reread_page; - } - new_objects: - lockdep_assert_irqs_disabled(); - freelist = get_partial(s, gfpflags, node, &page); - local_irq_restore(flags); if (freelist) goto check_new_page; _