On Tue, Jun 06, 2023 at 04:56:08PM +0200, Domenico Cerasuolo wrote: > With the recent enhancement to zswap enabling direct page writeback, the > need for the shrink code in zsmalloc has become obsolete. As a result, > this commit removes the page reclaim logic from zsmalloc entirely. > > Signed-off-by: Domenico Cerasuolo <cerasuolodomenico@xxxxxxxxx> > --- > mm/zsmalloc.c | 393 ++------------------------------------------------ > 1 file changed, 13 insertions(+), 380 deletions(-) > > diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c > index 02f7f414aade..75386283dba0 100644 > --- a/mm/zsmalloc.c > +++ b/mm/zsmalloc.c > @@ -107,21 +107,8 @@ > */ > #define OBJ_ALLOCATED_TAG 1 > > -#ifdef CONFIG_ZPOOL > -/* > - * The second least-significant bit in the object's header identifies if the > - * value stored at the header is a deferred handle from the last reclaim > - * attempt. > - * > - * As noted above, this is valid because we have room for two bits. > - */ > -#define OBJ_DEFERRED_HANDLE_TAG 2 > -#define OBJ_TAG_BITS 2 > -#define OBJ_TAG_MASK (OBJ_ALLOCATED_TAG | OBJ_DEFERRED_HANDLE_TAG) > -#else > #define OBJ_TAG_BITS 1 > #define OBJ_TAG_MASK OBJ_ALLOCATED_TAG > -#endif /* CONFIG_ZPOOL */ > > #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) > #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) > @@ -227,12 +214,6 @@ struct link_free { > * Handle of allocated object. > */ > unsigned long handle; > -#ifdef CONFIG_ZPOOL > - /* > - * Deferred handle of a reclaimed object. > - */ > - unsigned long deferred_handle; > -#endif > }; > }; > > @@ -250,13 +231,6 @@ struct zs_pool { > /* Compact classes */ > struct shrinker shrinker; > > -#ifdef CONFIG_ZPOOL > - /* List tracking the zspages in LRU order by most recently added object */ > - struct list_head lru; > - struct zpool *zpool; > - const struct zpool_ops *zpool_ops; > -#endif > - > #ifdef CONFIG_ZSMALLOC_STAT > struct dentry *stat_dentry; > #endif > @@ -279,13 +253,6 @@ struct zspage { > unsigned int freeobj; > struct page *first_page; > struct list_head list; /* fullness list */ > - > -#ifdef CONFIG_ZPOOL > - /* links the zspage to the lru list in the pool */ > - struct list_head lru; > - bool under_reclaim; > -#endif > - > struct zs_pool *pool; > rwlock_t lock; > }; > @@ -393,14 +360,7 @@ static void *zs_zpool_create(const char *name, gfp_t gfp, > * different contexts and its caller must provide a valid > * gfp mask. > */ > - struct zs_pool *pool = zs_create_pool(name); > - > - if (pool) { > - pool->zpool = zpool; > - pool->zpool_ops = zpool_ops; > - } > - > - return pool; > + return zs_create_pool(name); > } > > static void zs_zpool_destroy(void *pool) > @@ -422,27 +382,6 @@ static void zs_zpool_free(void *pool, unsigned long handle) > zs_free(pool, handle); > } > > -static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries); > - > -static int zs_zpool_shrink(void *pool, unsigned int pages, > - unsigned int *reclaimed) > -{ > - unsigned int total = 0; > - int ret = -EINVAL; > - > - while (total < pages) { > - ret = zs_reclaim_page(pool, 8); > - if (ret < 0) > - break; > - total++; > - } > - > - if (reclaimed) > - *reclaimed = total; > - > - return ret; > -} > - > static void *zs_zpool_map(void *pool, unsigned long handle, > enum zpool_mapmode mm) > { > @@ -481,7 +420,7 @@ static struct zpool_driver zs_zpool_driver = { > .malloc_support_movable = true, > .malloc = zs_zpool_malloc, > .free = zs_zpool_free, > - .shrink = zs_zpool_shrink, > + .shrink = NULL, You can simply delete the line instead since the NULL is default behavior. Other than that, Super nice. Thanks, Domenico! Acked-by: Minchan Kim <minchan@xxxxxxxxxx>