The SLAB_MEM_SPREAD flag used to be implemented in SLAB, which was removed. SLUB instead relies on the page allocator's NUMA policies. Change the flag's value to 0 to free up the value it had, and mark it for full removal once all users are gone. Reported-by: Steven Rostedt <rostedt@xxxxxxxxxxx> Closes: https://lore.kernel.org/all/20240131172027.10f64405@xxxxxxxxxxxxxxxxxx/ Reviewed-and-tested-by: Xiongwei Song <xiongwei.song@xxxxxxxxxxxxx> Reviewed-by: Chengming Zhou <chengming.zhou@xxxxxxxxx> Reviewed-by: Roman Gushchin <roman.gushchin@xxxxxxxxx> Signed-off-by: Vlastimil Babka <vbabka@xxxxxxx> --- include/linux/slab.h | 5 +++-- mm/slab.h | 1 - 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index b5f5ee8308d0..b1675ff6b904 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -96,8 +96,6 @@ */ /* Defer freeing slabs to RCU */ #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) -/* Spread some memory over cpuset */ -#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) /* Trace allocations and frees */ #define SLAB_TRACE ((slab_flags_t __force)0x00200000U) @@ -164,6 +162,9 @@ #endif #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ +/* Obsolete unused flag, to be removed */ +#define SLAB_MEM_SPREAD ((slab_flags_t __force)0U) + /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * diff --git a/mm/slab.h b/mm/slab.h index 54deeb0428c6..f4534eefb35d 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -469,7 +469,6 @@ static inline bool is_kmalloc_cache(struct kmem_cache *s) SLAB_STORE_USER | \ SLAB_TRACE | \ SLAB_CONSISTENCY_CHECKS | \ - SLAB_MEM_SPREAD | \ SLAB_NOLEAKTRACE | \ SLAB_RECLAIM_ACCOUNT | \ SLAB_TEMPORARY | \ -- 2.43.2