On Tue, Nov 08, 2022 at 11:32:05AM -0800, Nhat Pham wrote: > This helps determines the coldest zspages as candidates for writeback. > > Signed-off-by: Nhat Pham <nphamcs@xxxxxxxxx> > --- > mm/zsmalloc.c | 27 +++++++++++++++++++++++++++ > 1 file changed, 27 insertions(+) > > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c > index 326faa751f0a..600c40121544 100644 > --- a/mm/zsmalloc.c > +++ b/mm/zsmalloc.c > @@ -239,6 +239,9 @@ struct zs_pool { > /* Compact classes */ > struct shrinker shrinker; > > + /* List tracking the zspages in LRU order by most recently added object */ > + struct list_head lru; > + > #ifdef CONFIG_ZSMALLOC_STAT > struct dentry *stat_dentry; > #endif > @@ -260,6 +263,10 @@ struct zspage { > unsigned int freeobj; > struct page *first_page; > struct list_head list; /* fullness list */ > + > + /* links the zspage to the lru list in the pool */ > + struct list_head lru; Please put the LRU logic under config ZSMALLOC_LRU since we don't need the additional logic to others.