The quilt patch titled Subject: zsmalloc: add a LRU to zs_pool to keep track of zspages in LRU order has been removed from the -mm tree. Its filename was zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order.patch This patch was dropped because an updated version will be merged ------------------------------------------------------ From: Nhat Pham <nphamcs@xxxxxxxxx> Subject: zsmalloc: add a LRU to zs_pool to keep track of zspages in LRU order Date: Wed, 26 Oct 2022 13:06:11 -0700 This helps determines the coldest zspages as candidates for writeback. Link: https://lkml.kernel.org/r/20221026200613.1031261-4-nphamcs@xxxxxxxxx Signed-off-by: Nhat Pham <nphamcs@xxxxxxxxx> Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Dan Streetman <ddstreet@xxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Nitin Gupta <ngupta@xxxxxxxxxx> Cc: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> Cc: Seth Jennings <sjenning@xxxxxxxxxx> Cc: Vitaly Wool <vitaly.wool@xxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/zsmalloc.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) --- a/mm/zsmalloc.c~zsmalloc-add-a-lru-to-zs_pool-to-keep-track-of-zspages-in-lru-order +++ a/mm/zsmalloc.c @@ -239,6 +239,9 @@ struct zs_pool { /* Compact classes */ struct shrinker shrinker; + /* List tracking the zspages in LRU order by most recently added object */ + struct list_head lru; + #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; #endif @@ -260,6 +263,10 @@ struct zspage { unsigned int freeobj; struct page *first_page; struct list_head list; /* fullness list */ + + /* links the zspage to the lru list in the pool */ + struct list_head lru; + struct zs_pool *pool; #ifdef CONFIG_COMPACTION rwlock_t lock; @@ -352,6 +359,16 @@ static void cache_free_zspage(struct zs_ kmem_cache_free(pool->zspage_cachep, zspage); } +/* Moves the zspage to the front of the zspool's LRU */ +static void move_to_front(struct zs_pool *pool, struct zspage *zspage) +{ + assert_spin_locked(&pool->lock); + + if (!list_empty(&zspage->lru)) + list_del(&zspage->lru); + list_add(&zspage->lru, &pool->lru); +} + /* pool->lock(which owns the handle) synchronizes races */ static void record_obj(unsigned long handle, unsigned long obj) { @@ -953,6 +970,7 @@ static void free_zspage(struct zs_pool * } remove_zspage(class, zspage, ZS_EMPTY); + list_del(&zspage->lru); __free_zspage(pool, class, zspage); } @@ -998,6 +1016,8 @@ static void init_zspage(struct size_clas off %= PAGE_SIZE; } + INIT_LIST_HEAD(&zspage->lru); + set_freeobj(zspage, 0); } @@ -1418,6 +1438,8 @@ unsigned long zs_malloc(struct zs_pool * fix_fullness_group(class, zspage); record_obj(handle, obj); class_stat_inc(class, OBJ_USED, 1); + /* Move the zspage to front of pool's LRU */ + move_to_front(pool, zspage); spin_unlock(&pool->lock); return handle; @@ -1444,6 +1466,8 @@ unsigned long zs_malloc(struct zs_pool * /* We completely set up zspage so mark them as movable */ SetZsPageMovable(pool, zspage); + /* Move the zspage to front of pool's LRU */ + move_to_front(pool, zspage); spin_unlock(&pool->lock); return handle; @@ -1967,6 +1991,7 @@ static void async_free_zspage(struct wor VM_BUG_ON(fullness != ZS_EMPTY); class = pool->size_class[class_idx]; spin_lock(&pool->lock); + list_del(&zspage->lru); __free_zspage(pool, class, zspage); spin_unlock(&pool->lock); } @@ -2278,6 +2303,8 @@ struct zs_pool *zs_create_pool(const cha */ zs_register_shrinker(pool); + INIT_LIST_HEAD(&pool->lru); + return pool; err: _ Patches currently in -mm which might be from nphamcs@xxxxxxxxx are zsmalloc-add-ops-fields-to-zs_pool-to-store-evict-handlers.patch zsmalloc-implement-writeback-mechanism-for-zsmalloc.patch