The patch titled Subject: mm/zswap: reuse dstmem when decompress has been added to the -mm mm-unstable branch. Its filename is mm-zswap-reuse-dstmem-when-decompress.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-zswap-reuse-dstmem-when-decompress.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> Subject: mm/zswap: reuse dstmem when decompress Date: Tue, 26 Dec 2023 15:54:09 +0000 In the !zpool_can_sleep_mapped() case such as zsmalloc, we need to first copy the entry->handle memory to a temporary memory, which is allocated using kmalloc. Obviously we can reuse the per-compressor dstmem to avoid allocating every time, since it's percpu-compressor and protected in percpu mutex. Link: https://lkml.kernel.org/r/20231213-zswap-dstmem-v4-2-f228b059dd89@xxxxxxxxxxxxx Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> Reviewed-by: Nhat Pham <nphamcs@xxxxxxxxx> Reviewed-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx> Acked-by: Chris Li <chrisl@xxxxxxxxxx> (Google) Cc: Dan Streetman <ddstreet@xxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Seth Jennings <sjenning@xxxxxxxxxx> Cc: Vitaly Wool <vitaly.wool@xxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/zswap.c | 44 ++++++++++++-------------------------------- 1 file changed, 12 insertions(+), 32 deletions(-) --- a/mm/zswap.c~mm-zswap-reuse-dstmem-when-decompress +++ a/mm/zswap.c @@ -1417,19 +1417,13 @@ static int zswap_writeback_entry(struct struct crypto_acomp_ctx *acomp_ctx; struct zpool *pool = zswap_find_zpool(entry); bool page_was_allocated; - u8 *src, *tmp = NULL; + u8 *src; unsigned int dlen; int ret; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, }; - if (!zpool_can_sleep_mapped(pool)) { - tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!tmp) - return -ENOMEM; - } - /* try to allocate swap cache page */ mpol = get_task_policy(current); page = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, @@ -1465,15 +1459,15 @@ static int zswap_writeback_entry(struct /* decompress */ acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); dlen = PAGE_SIZE; + mutex_lock(acomp_ctx->mutex); src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO); if (!zpool_can_sleep_mapped(pool)) { - memcpy(tmp, src, entry->length); - src = tmp; + memcpy(acomp_ctx->dstmem, src, entry->length); + src = acomp_ctx->dstmem; zpool_unmap_handle(pool, entry->handle); } - mutex_lock(acomp_ctx->mutex); sg_init_one(&input, src, entry->length); sg_init_table(&output, 1); sg_set_page(&output, page, PAGE_SIZE, 0); @@ -1482,9 +1476,7 @@ static int zswap_writeback_entry(struct dlen = acomp_ctx->req->dlen; mutex_unlock(acomp_ctx->mutex); - if (!zpool_can_sleep_mapped(pool)) - kfree(tmp); - else + if (zpool_can_sleep_mapped(pool)) zpool_unmap_handle(pool, entry->handle); BUG_ON(ret); @@ -1503,9 +1495,6 @@ static int zswap_writeback_entry(struct return ret; fail: - if (!zpool_can_sleep_mapped(pool)) - kfree(tmp); - /* * If we get here because the page is already in swapcache, a * load may be happening concurrently. It is safe and okay to @@ -1766,7 +1755,7 @@ bool zswap_load(struct folio *folio) struct zswap_entry *entry; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; - u8 *src, *dst, *tmp; + u8 *src, *dst; struct zpool *zpool; unsigned int dlen; bool ret; @@ -1791,26 +1780,19 @@ bool zswap_load(struct folio *folio) } zpool = zswap_find_zpool(entry); - if (!zpool_can_sleep_mapped(zpool)) { - tmp = kmalloc(entry->length, GFP_KERNEL); - if (!tmp) { - ret = false; - goto freeentry; - } - } /* decompress */ dlen = PAGE_SIZE; - src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); + acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); + mutex_lock(acomp_ctx->mutex); + src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); if (!zpool_can_sleep_mapped(zpool)) { - memcpy(tmp, src, entry->length); - src = tmp; + memcpy(acomp_ctx->dstmem, src, entry->length); + src = acomp_ctx->dstmem; zpool_unmap_handle(zpool, entry->handle); } - acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); - mutex_lock(acomp_ctx->mutex); sg_init_one(&input, src, entry->length); sg_init_table(&output, 1); sg_set_page(&output, page, PAGE_SIZE, 0); @@ -1821,15 +1803,13 @@ bool zswap_load(struct folio *folio) if (zpool_can_sleep_mapped(zpool)) zpool_unmap_handle(zpool, entry->handle); - else - kfree(tmp); ret = true; stats: count_vm_event(ZSWPIN); if (entry->objcg) count_objcg_event(entry->objcg, ZSWPIN); -freeentry: + spin_lock(&tree->lock); if (ret && zswap_exclusive_loads_enabled) { zswap_invalidate_entry(tree, entry); _ Patches currently in -mm which might be from zhouchengming@xxxxxxxxxxxxx are mm-zswap-change-dstmem-size-to-one-page.patch mm-zswap-reuse-dstmem-when-decompress.patch mm-zswap-refactor-out-__zswap_load.patch mm-zswap-cleanup-zswap_load.patch mm-zswap-cleanup-zswap_writeback_entry.patch mm-zswap-change-per-cpu-mutex-and-buffer-to-per-acomp_ctx.patch