The patch titled Subject: mm/zswap: directly use percpu mutex and buffer in load/store has been added to the -mm mm-unstable branch. Its filename is mm-zswap-directly-use-percpu-mutex-and-buffer-in-load-store.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-zswap-directly-use-percpu-mutex-and-buffer-in-load-store.patch This patch will later appear in the mm-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> Subject: mm/zswap: directly use percpu mutex and buffer in load/store Date: Mon, 18 Dec 2023 11:50:36 +0000 Since the introduce of reusing the dstmem in the load path, it seems confusing that we are now using acomp_ctx->dstmem and acomp_ctx->mutex now for purposes other than what the naming suggests. Yosry suggested removing these two fields from acomp_ctx, and directly using zswap_dstmem and zswap_mutex in both the load and store paths, rename them, and add proper comments above their definitions that they are for generic percpu buffering on the load and store paths. So this patch remove dstmem and mutex from acomp_ctx, and rename the zswap_dstmem to zswap_buffer, using the percpu mutex and buffer on the load and store paths. Link: https://lkml.kernel.org/r/20231213-zswap-dstmem-v3-6-4eac09b94ece@xxxxxxxxxxxxx Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx> Suggested-by: Yosry Ahmed <yosryahmed@xxxxxxxxxx> Cc: Chris Li <chrisl@xxxxxxxxxx> Cc: Dan Streetman <ddstreet@xxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: Nhat Pham <nphamcs@xxxxxxxxx> Cc: Seth Jennings <sjenning@xxxxxxxxxx> Cc: Vitaly Wool <vitaly.wool@xxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/zswap.c | 69 +++++++++++++++++++++++++++------------------------ 1 file changed, 37 insertions(+), 32 deletions(-) --- a/mm/zswap.c~mm-zswap-directly-use-percpu-mutex-and-buffer-in-load-store +++ a/mm/zswap.c @@ -166,8 +166,6 @@ struct crypto_acomp_ctx { struct crypto_acomp *acomp; struct acomp_req *req; struct crypto_wait wait; - u8 *dstmem; - struct mutex *mutex; }; /* @@ -694,7 +692,7 @@ static void zswap_alloc_shrinker(struct /********************************* * per-cpu code **********************************/ -static DEFINE_PER_CPU(u8 *, zswap_dstmem); +static DEFINE_PER_CPU(u8 *, zswap_buffer); /* * If users dynamically change the zpool type and compressor at runtime, i.e. * zswap is running, zswap can have more than one zpool on one cpu, but they @@ -702,39 +700,39 @@ static DEFINE_PER_CPU(u8 *, zswap_dstmem */ static DEFINE_PER_CPU(struct mutex *, zswap_mutex); -static int zswap_dstmem_prepare(unsigned int cpu) +static int zswap_buffer_prepare(unsigned int cpu) { struct mutex *mutex; - u8 *dst; + u8 *buf; - dst = kmalloc_node(PAGE_SIZE, GFP_KERNEL, cpu_to_node(cpu)); - if (!dst) + buf = kmalloc_node(PAGE_SIZE, GFP_KERNEL, cpu_to_node(cpu)); + if (!buf) return -ENOMEM; mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu)); if (!mutex) { - kfree(dst); + kfree(buf); return -ENOMEM; } mutex_init(mutex); - per_cpu(zswap_dstmem, cpu) = dst; + per_cpu(zswap_buffer, cpu) = buf; per_cpu(zswap_mutex, cpu) = mutex; return 0; } -static int zswap_dstmem_dead(unsigned int cpu) +static int zswap_buffer_dead(unsigned int cpu) { struct mutex *mutex; - u8 *dst; + u8 *buf; mutex = per_cpu(zswap_mutex, cpu); kfree(mutex); per_cpu(zswap_mutex, cpu) = NULL; - dst = per_cpu(zswap_dstmem, cpu); - kfree(dst); - per_cpu(zswap_dstmem, cpu) = NULL; + buf = per_cpu(zswap_buffer, cpu); + kfree(buf); + per_cpu(zswap_buffer, cpu) = NULL; return 0; } @@ -772,9 +770,6 @@ static int zswap_cpu_comp_prepare(unsign acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &acomp_ctx->wait); - acomp_ctx->mutex = per_cpu(zswap_mutex, cpu); - acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu); - return 0; } @@ -1397,15 +1392,21 @@ static void __zswap_load(struct zswap_en struct zpool *zpool = zswap_find_zpool(entry); struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; - u8 *src; + u8 *src, *buf; + int cpu; + struct mutex *mutex; - acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); - mutex_lock(acomp_ctx->mutex); + cpu = raw_smp_processor_id(); + mutex = per_cpu(zswap_mutex, cpu); + mutex_lock(mutex); + + acomp_ctx = per_cpu_ptr(entry->pool->acomp_ctx, cpu); src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO); if (!zpool_can_sleep_mapped(zpool)) { - memcpy(acomp_ctx->dstmem, src, entry->length); - src = acomp_ctx->dstmem; + buf = per_cpu(zswap_buffer, cpu); + memcpy(buf, src, entry->length); + src = buf; zpool_unmap_handle(zpool, entry->handle); } @@ -1415,7 +1416,7 @@ static void __zswap_load(struct zswap_en acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE); BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE); - mutex_unlock(acomp_ctx->mutex); + mutex_unlock(mutex); if (zpool_can_sleep_mapped(zpool)) zpool_unmap_handle(zpool, entry->handle); @@ -1546,6 +1547,8 @@ bool zswap_store(struct folio *folio) u8 *src, *dst; gfp_t gfp; int ret; + int cpu; + struct mutex *mutex; VM_WARN_ON_ONCE(!folio_test_locked(folio)); VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); @@ -1631,11 +1634,13 @@ bool zswap_store(struct folio *folio) } /* compress */ - acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); + cpu = raw_smp_processor_id(); + mutex = per_cpu(zswap_mutex, cpu); + mutex_lock(mutex); - mutex_lock(acomp_ctx->mutex); + acomp_ctx = per_cpu_ptr(entry->pool->acomp_ctx, cpu); + dst = per_cpu(zswap_buffer, cpu); - dst = acomp_ctx->dstmem; sg_init_table(&input, 1); sg_set_page(&input, page, PAGE_SIZE, 0); @@ -1678,7 +1683,7 @@ bool zswap_store(struct folio *folio) buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO); memcpy(buf, dst, dlen); zpool_unmap_handle(zpool, handle); - mutex_unlock(acomp_ctx->mutex); + mutex_unlock(mutex); /* populate entry */ entry->swpentry = swp_entry(type, offset); @@ -1721,7 +1726,7 @@ insert_entry: return true; put_dstmem: - mutex_unlock(acomp_ctx->mutex); + mutex_unlock(mutex); put_pool: zswap_pool_put(entry->pool); freepage: @@ -1897,10 +1902,10 @@ static int zswap_setup(void) } ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare", - zswap_dstmem_prepare, zswap_dstmem_dead); + zswap_buffer_prepare, zswap_buffer_dead); if (ret) { - pr_err("dstmem alloc failed\n"); - goto dstmem_fail; + pr_err("buffer alloc failed\n"); + goto buffer_fail; } ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, @@ -1935,7 +1940,7 @@ fallback_fail: zswap_pool_destroy(pool); hp_fail: cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE); -dstmem_fail: +buffer_fail: kmem_cache_destroy(zswap_entry_cache); cache_fail: /* if built-in, we aren't unloaded on failure; don't allow use */ _ Patches currently in -mm which might be from zhouchengming@xxxxxxxxxxxxx are mm-zswap-change-dstmem-size-to-one-page.patch mm-zswap-reuse-dstmem-when-decompress.patch mm-zswap-refactor-out-__zswap_load.patch mm-zswap-cleanup-zswap_load.patch mm-zswap-cleanup-zswap_writeback_entry.patch mm-zswap-directly-use-percpu-mutex-and-buffer-in-load-store.patch