zswap_compress_folio() is modified to detect if the pool's acomp_ctx has more than one "nr_reqs", which will be the case if the cpu onlining code has allocated multiple batching resources in the acomp_ctx. If so, it means compress batching can be used with a batch-size of "acomp_ctx->nr_reqs". If compress batching can be used, zswap_compress_folio() will invoke the newly added zswap_batch_compress() procedure to compress and store the folio in batches of "acomp_ctx->nr_reqs" pages. With Intel IAA, the iaa_crypto driver will compress each batch of pages in parallel in hardware. Hence, zswap_batch_compress() does the same computes for a batch, as zswap_compress() does for a page; and returns true if the batch was successfully compressed/stored, and false otherwise. If the pool does not support compress batching, or the folio has only one page, zswap_compress_folio() calls zswap_compress() for each individual page in the folio, as before. Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@xxxxxxxxx> --- mm/zswap.c | 122 +++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 113 insertions(+), 9 deletions(-) diff --git a/mm/zswap.c b/mm/zswap.c index 6563d12e907b..f1cba77eda62 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -985,10 +985,11 @@ static void acomp_ctx_put_unlock(struct crypto_acomp_ctx *acomp_ctx) mutex_unlock(&acomp_ctx->mutex); } +/* The per-cpu @acomp_ctx mutex should be locked/unlocked in the caller. */ static bool zswap_compress(struct page *page, struct zswap_entry *entry, - struct zswap_pool *pool) + struct zswap_pool *pool, + struct crypto_acomp_ctx *acomp_ctx) { - struct crypto_acomp_ctx *acomp_ctx; struct scatterlist input, output; int comp_ret = 0, alloc_ret = 0; unsigned int dlen = PAGE_SIZE; @@ -998,7 +999,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, gfp_t gfp; u8 *dst; - acomp_ctx = acomp_ctx_get_cpu_lock(pool); dst = acomp_ctx->buffers[0]; sg_init_table(&input, 1); sg_set_page(&input, page, PAGE_SIZE, 0); @@ -1051,7 +1051,6 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, else if (alloc_ret) zswap_reject_alloc_fail++; - acomp_ctx_put_unlock(acomp_ctx); return comp_ret == 0 && alloc_ret == 0; } @@ -1509,20 +1508,125 @@ static void shrink_worker(struct work_struct *w) * main API **********************************/ +/* The per-cpu @acomp_ctx mutex should be locked/unlocked in the caller. */ +static bool zswap_batch_compress(struct folio *folio, + long index, + unsigned int batch_size, + struct zswap_entry *entries[], + struct zswap_pool *pool, + struct crypto_acomp_ctx *acomp_ctx) +{ + int comp_errors[ZSWAP_MAX_BATCH_SIZE] = { 0 }; + unsigned int dlens[ZSWAP_MAX_BATCH_SIZE]; + struct page *pages[ZSWAP_MAX_BATCH_SIZE]; + unsigned int i, nr_batch_pages; + bool ret = true; + + nr_batch_pages = min((unsigned int)(folio_nr_pages(folio) - index), batch_size); + + for (i = 0; i < nr_batch_pages; ++i) { + pages[i] = folio_page(folio, index + i); + dlens[i] = PAGE_SIZE; + } + + /* + * Batch compress @nr_batch_pages. If IAA is the compressor, the + * hardware will compress @nr_batch_pages in parallel. + */ + ret = crypto_acomp_batch_compress( + acomp_ctx->reqs, + NULL, + pages, + acomp_ctx->buffers, + dlens, + comp_errors, + nr_batch_pages); + + if (ret) { + /* + * All batch pages were successfully compressed. + * Store the pages in zpool. + */ + struct zpool *zpool = pool->zpool; + gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; + + if (zpool_malloc_support_movable(zpool)) + gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; + + for (i = 0; i < nr_batch_pages; ++i) { + unsigned long handle; + char *buf; + int err; + + err = zpool_malloc(zpool, dlens[i], gfp, &handle); + + if (err) { + if (err == -ENOSPC) + zswap_reject_compress_poor++; + else + zswap_reject_alloc_fail++; + + ret = false; + break; + } + + buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO); + memcpy(buf, acomp_ctx->buffers[i], dlens[i]); + zpool_unmap_handle(zpool, handle); + + entries[i]->handle = handle; + entries[i]->length = dlens[i]; + } + } else { + /* Some batch pages had compression errors. */ + for (i = 0; i < nr_batch_pages; ++i) { + if (comp_errors[i]) { + if (comp_errors[i] == -ENOSPC) + zswap_reject_compress_poor++; + else + zswap_reject_compress_fail++; + } + } + } + + return ret; +} + static bool zswap_compress_folio(struct folio *folio, struct zswap_entry *entries[], struct zswap_pool *pool) { long index, nr_pages = folio_nr_pages(folio); + struct crypto_acomp_ctx *acomp_ctx; + unsigned int batch_size; + bool ret = true; - for (index = 0; index < nr_pages; ++index) { - struct page *page = folio_page(folio, index); + acomp_ctx = acomp_ctx_get_cpu_lock(pool); + batch_size = acomp_ctx->nr_reqs; + + if ((batch_size > 1) && (nr_pages > 1)) { + for (index = 0; index < nr_pages; index += batch_size) { + + if (!zswap_batch_compress(folio, index, batch_size, + &entries[index], pool, acomp_ctx)) { + ret = false; + goto unlock_acomp_ctx; + } + } + } else { + for (index = 0; index < nr_pages; ++index) { + struct page *page = folio_page(folio, index); - if (!zswap_compress(page, entries[index], pool)) - return false; + if (!zswap_compress(page, entries[index], pool, acomp_ctx)) { + ret = false; + goto unlock_acomp_ctx; + } + } } - return true; +unlock_acomp_ctx: + acomp_ctx_put_unlock(acomp_ctx); + return ret; } /* -- 2.27.0