On 2/23/24 04:12, Christoph Lameter (Ampere) wrote: > On Tue, 20 Feb 2024, Vlastimil Babka wrote: > >> diff --git a/mm/slub.c b/mm/slub.c >> index 2ef88bbf56a3..a93c5a17cbbb 100644 >> --- a/mm/slub.c >> +++ b/mm/slub.c >> @@ -306,13 +306,13 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) >> >> /* Internal SLUB flags */ >> /* Poison object */ >> -#define __OBJECT_POISON ((slab_flags_t __force)0x80000000U) >> +#define __OBJECT_POISON __SF_BIT(_SLAB_OBJECT_POISON) >> /* Use cmpxchg_double */ >> >> #ifdef system_has_freelist_aba >> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000U) >> +#define __CMPXCHG_DOUBLE __SF_BIT(_SLAB_CMPXCHG_DOUBLE) >> #else >> -#define __CMPXCHG_DOUBLE ((slab_flags_t __force)0U) >> +#define __CMPXCHG_DOUBLE 0 >> #endif > > Maybe its good to put these internal flags together with the other flags. > After all there is no other slab allocator available anymore and having > them all together avoids confusion. Good poiint, will do. Then I can also #undef the helper macro after the last flag.