Add a crypto acomp poll() implementation so that callers can use true async iaa compress/decompress without interrupts. To use this mode with zswap, select the 'async' iaa_crypto driver_sync_mode: echo async > /sys/bus/dsa/drivers/crypto/sync_mode This will cause the iaa_crypto driver to register its acomp_alg implementation using a non-NULL poll() member, which callers such as zswap can check for the presence of and use true async mode if found. Signed-off-by: Tom Zanussi <tom.zanussi@xxxxxxxxxxxxxxx> Signed-off-by: Kanchana P Sridhar <kanchana.p.sridhar@xxxxxxxxx> --- drivers/crypto/intel/iaa/iaa_crypto_main.c | 74 ++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index 237f87000070..6a8577ac1330 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1788,6 +1788,74 @@ static void compression_ctx_init(struct iaa_compression_ctx *ctx) ctx->use_irq = use_irq; } +static int iaa_comp_poll(struct acomp_req *req) +{ + struct idxd_desc *idxd_desc; + struct idxd_device *idxd; + struct iaa_wq *iaa_wq; + struct pci_dev *pdev; + struct device *dev; + struct idxd_wq *wq; + bool compress_op; + int ret; + + idxd_desc = req->base.data; + if (!idxd_desc) + return -EAGAIN; + + compress_op = (idxd_desc->iax_hw->opcode == IAX_OPCODE_COMPRESS); + wq = idxd_desc->wq; + iaa_wq = idxd_wq_get_private(wq); + idxd = iaa_wq->iaa_device->idxd; + pdev = idxd->pdev; + dev = &pdev->dev; + + ret = check_completion(dev, idxd_desc->iax_completion, true, true); + if (ret == -EAGAIN) + return ret; + if (ret) + goto out; + + req->dlen = idxd_desc->iax_completion->output_size; + + /* Update stats */ + if (compress_op) { + update_total_comp_bytes_out(req->dlen); + update_wq_comp_bytes(wq, req->dlen); + } else { + update_total_decomp_bytes_in(req->slen); + update_wq_decomp_bytes(wq, req->slen); + } + + if (iaa_verify_compress && (idxd_desc->iax_hw->opcode == IAX_OPCODE_COMPRESS)) { + struct crypto_tfm *tfm = req->base.tfm; + dma_addr_t src_addr, dst_addr; + u32 compression_crc; + + compression_crc = idxd_desc->iax_completion->crc; + + dma_sync_sg_for_device(dev, req->dst, 1, DMA_FROM_DEVICE); + dma_sync_sg_for_device(dev, req->src, 1, DMA_TO_DEVICE); + + src_addr = sg_dma_address(req->src); + dst_addr = sg_dma_address(req->dst); + + ret = iaa_compress_verify(tfm, req, wq, src_addr, req->slen, + dst_addr, &req->dlen, compression_crc); + } +out: + /* caller doesn't call crypto_wait_req, so no acomp_request_complete() */ + + dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); + dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); + + idxd_free_desc(idxd_desc->wq, idxd_desc); + + dev_dbg(dev, "%s: returning ret=%d\n", __func__, ret); + + return ret; +} + static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) { struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); @@ -1813,6 +1881,7 @@ static struct acomp_alg iaa_acomp_fixed_deflate = { .compress = iaa_comp_acompress, .decompress = iaa_comp_adecompress, .dst_free = dst_free, + .poll = iaa_comp_poll, .base = { .cra_name = "deflate", .cra_driver_name = "deflate-iaa", @@ -1827,6 +1896,11 @@ static int iaa_register_compression_device(void) { int ret; + if (async_mode && !use_irq) + iaa_acomp_fixed_deflate.poll = iaa_comp_poll; + else + iaa_acomp_fixed_deflate.poll = NULL; + ret = crypto_register_acomp(&iaa_acomp_fixed_deflate); if (ret) { pr_err("deflate algorithm acomp fixed registration failed (%d)\n", ret); -- 2.27.0