allocate_slab() currently re-enables irqs before calling to the page allocator. It depends on gfpflags_allow_blocking() to determine if it's safe to do so. Now we can instead simply restore irq before calling it through new_slab(). The other caller early_kmem_cache_node_alloc() is unaffected Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> --- mm/slub.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index 16f3cbb81627..47a3438d6a35 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1763,9 +1763,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) flags &= gfp_allowed_mask; - if (gfpflags_allow_blocking(flags)) - local_irq_enable(); - flags |= s->allocflags; /* @@ -1824,8 +1821,6 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) page->frozen = 1; out: - if (gfpflags_allow_blocking(flags)) - local_irq_disable(); if (!page) return NULL; @@ -2753,17 +2748,18 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, goto check_new_page; } + local_irq_restore(flags); migrate_enable(); page = new_slab(s, gfpflags, node); migrate_disable(); c = this_cpu_ptr(s->cpu_slab); if (unlikely(!page)) { - local_irq_restore(flags); slab_out_of_memory(s, gfpflags, node); return NULL; } + local_irq_save(flags); if (c->page) flush_slab(s, c); -- 2.31.1