From: Vlastimil Babka <vbabka@xxxxxxx> Subject: mm, slub: optionally save/restore irqs in slab_[un]lock()/ For PREEMPT_RT we will need to disable irqs for this bit spinlock. As a preparation, add a flags parameter, and an internal version that takes additional bool parameter to control irq saving/restoring (the flags parameter is compile-time unused if the bool is a constant false). Convert ___cmpxchg_double_slab(), which also comes with the same bool parameter, to use the internal version. Link: https://lkml.kernel.org/r/20210805152000.12817-32-vbabka@xxxxxxx Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> Cc: Christoph Lameter <cl@xxxxxxxxx> Cc: David Rientjes <rientjes@xxxxxxxxxx> Cc: Jann Horn <jannh@xxxxxxxxxx> Cc: Jesper Dangaard Brouer <brouer@xxxxxxxxxx> Cc: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> Cc: Mel Gorman <mgorman@xxxxxxxxxxxxxxxxxxx> Cc: Mike Galbraith <efault@xxxxxx> Cc: Pekka Enberg <penberg@xxxxxxxxxx> Cc: Sebastian Andrzej Siewior <bigeasy@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/slub.c | 52 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 19 deletions(-) --- a/mm/slub.c~mm-slub-optionally-save-restore-irqs-in-slab_lock +++ a/mm/slub.c @@ -359,16 +359,33 @@ static inline unsigned int oo_objects(st /* * Per slab locking using the pagelock */ -static __always_inline void slab_lock(struct page *page) +static __always_inline void +__slab_lock(struct page *page, unsigned long *flags, bool disable_irqs) { VM_BUG_ON_PAGE(PageTail(page), page); + if (disable_irqs) + local_irq_save(*flags); bit_spin_lock(PG_locked, &page->flags); } -static __always_inline void slab_unlock(struct page *page) +static __always_inline void +__slab_unlock(struct page *page, unsigned long *flags, bool disable_irqs) { VM_BUG_ON_PAGE(PageTail(page), page); __bit_spin_unlock(PG_locked, &page->flags); + if (disable_irqs) + local_irq_restore(*flags); +} + +static __always_inline void +slab_lock(struct page *page, unsigned long *flags) +{ + __slab_lock(page, flags, false); +} + +static __always_inline void slab_unlock(struct page *page, unsigned long *flags) +{ + __slab_unlock(page, flags, false); } static inline bool ___cmpxchg_double_slab(struct kmem_cache *s, struct page *page, @@ -388,23 +405,18 @@ static inline bool ___cmpxchg_double_sla } else #endif { - unsigned long flags; + /* init to 0 to prevent spurious warnings */ + unsigned long flags = 0; - if (disable_irqs) - local_irq_save(flags); - slab_lock(page); + __slab_lock(page, &flags, disable_irqs); if (page->freelist == freelist_old && page->counters == counters_old) { page->freelist = freelist_new; page->counters = counters_new; - slab_unlock(page); - if (disable_irqs) - local_irq_restore(flags); + __slab_unlock(page, &flags, disable_irqs); return true; } - slab_unlock(page); - if (disable_irqs) - local_irq_restore(flags); + __slab_unlock(page, &flags, disable_irqs); } cpu_relax(); @@ -1255,11 +1267,11 @@ static noinline int free_debug_processin struct kmem_cache_node *n = get_node(s, page_to_nid(page)); void *object = head; int cnt = 0; - unsigned long flags; + unsigned long flags, flags2; int ret = 0; spin_lock_irqsave(&n->list_lock, flags); - slab_lock(page); + slab_lock(page, &flags2); if (s->flags & SLAB_CONSISTENCY_CHECKS) { if (!check_slab(s, page)) @@ -1292,7 +1304,7 @@ out: slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n", bulk_cnt, cnt); - slab_unlock(page); + slab_unlock(page, &flags2); spin_unlock_irqrestore(&n->list_lock, flags); if (!ret) slab_fix(s, "Object at 0x%p not freed", object); @@ -4070,9 +4082,10 @@ static void list_slab_objects(struct kme void *addr = page_address(page); unsigned long *map; void *p; + unsigned long flags; slab_err(s, page, text, s->name); - slab_lock(page); + slab_lock(page, &flags); map = get_map(s, page); for_each_object(p, s, addr, page->objects) { @@ -4083,7 +4096,7 @@ static void list_slab_objects(struct kme } } put_map(map); - slab_unlock(page); + slab_unlock(page, &flags); #endif } @@ -4815,8 +4828,9 @@ static void validate_slab(struct kmem_ca { void *p; void *addr = page_address(page); + unsigned long flags; - slab_lock(page); + slab_lock(page, &flags); if (!check_slab(s, page) || !on_freelist(s, page, NULL)) goto unlock; @@ -4831,7 +4845,7 @@ static void validate_slab(struct kmem_ca break; } unlock: - slab_unlock(page); + slab_unlock(page, &flags); } static int validate_slab_node(struct kmem_cache *s, _