If the crypto_acomp has a poll interface registered, zswap_compress() and zswap_decompress() will submit the acomp_req, and then poll() for a successful completion/error status in a busy-wait loop. This allows an asynchronous way to manage (potentially multiple) acomp_reqs without the use of interrupts, which is supported in the iaa_crypto driver. This enables us to implement batch submission of multiple compression/decompression jobs to the Intel IAA hardware accelerator, which will process them in parallel; followed by polling the batch's acomp_reqs for completion status. Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@xxxxxxxxx> --- mm/zswap.c | 51 +++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 39 insertions(+), 12 deletions(-) diff --git a/mm/zswap.c b/mm/zswap.c index f6316b66fb23..948c9745ee57 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -910,18 +910,34 @@ static bool zswap_compress(struct page *page, struct zswap_entry *entry, acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen); /* - * it maybe looks a little bit silly that we send an asynchronous request, - * then wait for its completion synchronously. This makes the process look - * synchronous in fact. - * Theoretically, acomp supports users send multiple acomp requests in one - * acomp instance, then get those requests done simultaneously. but in this - * case, zswap actually does store and load page by page, there is no - * existing method to send the second page before the first page is done - * in one thread doing zwap. - * but in different threads running on different cpu, we have different - * acomp instance, so multiple threads can do (de)compression in parallel. + * If the crypto_acomp provides an asynchronous poll() interface, + * submit the descriptor and poll for a completion status. + * + * It maybe looks a little bit silly that we send an asynchronous + * request, then wait for its completion in a busy-wait poll loop, or, + * synchronously. This makes the process look synchronous in fact. + * Theoretically, acomp supports users send multiple acomp requests in + * one acomp instance, then get those requests done simultaneously. + * But in this case, zswap actually does store and load page by page, + * there is no existing method to send the second page before the + * first page is done in one thread doing zswap. + * But in different threads running on different cpu, we have different + * acomp instance, so multiple threads can do (de)compression in + * parallel. */ - comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); + if (acomp_ctx->acomp->poll) { + comp_ret = crypto_acomp_compress(acomp_ctx->req); + if (comp_ret == -EINPROGRESS) { + do { + comp_ret = crypto_acomp_poll(acomp_ctx->req); + if (comp_ret && comp_ret != -EAGAIN) + break; + } while (comp_ret); + } + } else { + comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait); + } + dlen = acomp_ctx->req->dlen; if (comp_ret) goto unlock; @@ -959,6 +975,7 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; u8 *src; + int ret; acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); mutex_lock(&acomp_ctx->mutex); @@ -984,7 +1001,17 @@ static void zswap_decompress(struct zswap_entry *entry, struct folio *folio) sg_init_table(&output, 1); sg_set_folio(&output, folio, PAGE_SIZE, 0); acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE); - BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); + if (acomp_ctx->acomp->poll) { + ret = crypto_acomp_decompress(acomp_ctx->req); + if (ret == -EINPROGRESS) { + do { + ret = crypto_acomp_poll(acomp_ctx->req); + BUG_ON(ret && ret != -EAGAIN); + } while (ret); + } + } else { + BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait)); + } BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE); mutex_unlock(&acomp_ctx->mutex); -- 2.27.0