The quilt patch titled Subject: mm/zsmalloc: remove migrate_write_lock_nested() has been removed from the -mm tree. Its filename was mm-zsmalloc-remove-migrate_write_lock_nested.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> Subject: mm/zsmalloc: remove migrate_write_lock_nested() Date: Mon, 19 Feb 2024 13:33:52 +0000 The migrate write lock is to protect the race between zspage migration and zspage objects' map users. We only need to lock out the map users of src zspage, not dst zspage, which is safe to map by users concurrently, since we only need to do obj_malloc() from dst zspage. So we can remove the migrate_write_lock_nested() use case. As we are here, cleanup the __zs_compact() by moving putback_zspage() outside of migrate_write_unlock since we hold pool lock, no malloc or free users can come in. Link: https://lkml.kernel.org/r/20240219-b4-szmalloc-migrate-v1-2-34cd49c6545b@xxxxxxxxxxxxx Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> Reviewed-by: Sergey Senozhatsky <senozhatsky@xxxxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Nhat Pham <nphamcs@xxxxxxxxx> Cc: Yosry Ahmed <yosryahmed@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/zsmalloc.c | 22 +++++----------------- 1 file changed, 5 insertions(+), 17 deletions(-) --- a/mm/zsmalloc.c~mm-zsmalloc-remove-migrate_write_lock_nested +++ a/mm/zsmalloc.c @@ -279,7 +279,6 @@ static void migrate_lock_init(struct zsp static void migrate_read_lock(struct zspage *zspage); static void migrate_read_unlock(struct zspage *zspage); static void migrate_write_lock(struct zspage *zspage); -static void migrate_write_lock_nested(struct zspage *zspage); static void migrate_write_unlock(struct zspage *zspage); #ifdef CONFIG_COMPACTION @@ -1727,11 +1726,6 @@ static void migrate_write_lock(struct zs write_lock(&zspage->lock); } -static void migrate_write_lock_nested(struct zspage *zspage) -{ - write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING); -} - static void migrate_write_unlock(struct zspage *zspage) { write_unlock(&zspage->lock); @@ -2003,19 +1997,17 @@ static unsigned long __zs_compact(struct dst_zspage = isolate_dst_zspage(class); if (!dst_zspage) break; - migrate_write_lock(dst_zspage); } src_zspage = isolate_src_zspage(class); if (!src_zspage) break; - migrate_write_lock_nested(src_zspage); - + migrate_write_lock(src_zspage); migrate_zspage(pool, src_zspage, dst_zspage); - fg = putback_zspage(class, src_zspage); migrate_write_unlock(src_zspage); + fg = putback_zspage(class, src_zspage); if (fg == ZS_INUSE_RATIO_0) { free_zspage(pool, class, src_zspage); pages_freed += class->pages_per_zspage; @@ -2025,7 +2017,6 @@ static unsigned long __zs_compact(struct if (get_fullness_group(class, dst_zspage) == ZS_INUSE_RATIO_100 || spin_is_contended(&pool->lock)) { putback_zspage(class, dst_zspage); - migrate_write_unlock(dst_zspage); dst_zspage = NULL; spin_unlock(&pool->lock); @@ -2034,15 +2025,12 @@ static unsigned long __zs_compact(struct } } - if (src_zspage) { + if (src_zspage) putback_zspage(class, src_zspage); - migrate_write_unlock(src_zspage); - } - if (dst_zspage) { + if (dst_zspage) putback_zspage(class, dst_zspage); - migrate_write_unlock(dst_zspage); - } + spin_unlock(&pool->lock); return pages_freed; _ Patches currently in -mm which might be from zhouchengming@xxxxxxxxxxxxx are mm-zswap-global-lru-and-shrinker-shared-by-all-zswap_pools.patch mm-zswap-change-zswap_pool-kref-to-percpu_ref.patch