From: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> Now we rely on the "frozen" bit to see if we should manipulate the slab->slab_list, which will be changed in the following patch. Instead we introduce another way to keep track of whether slub is on the per-node partial list, here we reuse the PG_workingset bit. Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> --- include/linux/page-flags.h | 2 ++ mm/slab.h | 19 +++++++++++++++++++ mm/slub.c | 3 +++ 3 files changed, 24 insertions(+) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index a88e64acebfe..e8b1be71d722 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -478,6 +478,8 @@ PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) TESTCLEARFLAG(Active, active, PF_HEAD) PAGEFLAG(Workingset, workingset, PF_HEAD) TESTCLEARFLAG(Workingset, workingset, PF_HEAD) + __SETPAGEFLAG(Workingset, workingset, PF_HEAD) + __CLEARPAGEFLAG(Workingset, workingset, PF_HEAD) __PAGEFLAG(Slab, slab, PF_NO_TAIL) PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ diff --git a/mm/slab.h b/mm/slab.h index 8cd3294fedf5..9cff64cae8de 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -193,6 +193,25 @@ static inline void __slab_clear_pfmemalloc(struct slab *slab) __folio_clear_active(slab_folio(slab)); } +/* + * Slub reuse PG_workingset bit to keep track of whether it's on + * the per-node partial list. + */ +static inline bool slab_test_node_partial(const struct slab *slab) +{ + return folio_test_workingset((struct folio *)slab_folio(slab)); +} + +static inline void slab_set_node_partial(struct slab *slab) +{ + __folio_set_workingset(slab_folio(slab)); +} + +static inline void slab_clear_node_partial(struct slab *slab) +{ + __folio_clear_workingset(slab_folio(slab)); +} + static inline void *slab_address(const struct slab *slab) { return folio_address(slab_folio(slab)); diff --git a/mm/slub.c b/mm/slub.c index 63d281dfacdb..3fad4edca34b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2127,6 +2127,7 @@ __add_partial(struct kmem_cache_node *n, struct slab *slab, int tail) list_add_tail(&slab->slab_list, &n->partial); else list_add(&slab->slab_list, &n->partial); + slab_set_node_partial(slab); } static inline void add_partial(struct kmem_cache_node *n, @@ -2141,6 +2142,7 @@ static inline void remove_partial(struct kmem_cache_node *n, { lockdep_assert_held(&n->list_lock); list_del(&slab->slab_list); + slab_clear_node_partial(slab); n->nr_partial--; } @@ -4831,6 +4833,7 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s) if (free == slab->objects) { list_move(&slab->slab_list, &discard); + slab_clear_node_partial(slab); n->nr_partial--; dec_slabs_node(s, node, slab->objects); } else if (free <= SHRINK_PROMOTE_MAX) -- 2.20.1