After commit f5ff79fddf0e ("dma-mapping: remove CONFIG_DMA_REMAP"), if the algorithms are enabled, the driver crashes with a BUG_ON while executing vunmap() in the context of a tasklet. This is due to the fact that the function dma_free_coherent() cannot be called in an interrupt context (see Documentation/core-api/dma-api-howto.rst). The functions qat_rsa_enc() and qat_rsa_dec() allocate memory with dma_alloc_coherent() if the source or the destination buffers are made of multiple flat buffers or of a size that is not compatible with the hardware. This memory is then freed with dma_free_coherent() in the context of a tasklet invoked to handle the response for the corresponding request. Replace allocations with dma_alloc_coherent() in the functions qat_rsa_enc() and qat_rsa_dec() with kmalloc() + dma_map_single(). Cc: stable@xxxxxxxxxxxxxxx Fixes: a990532023b9 ("crypto: qat - Add support for RSA algorithm") Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@xxxxxxxxx> Reviewed-by: Adam Guerin <adam.guerin@xxxxxxxxx> Reviewed-by: Wojciech Ziemba <wojciech.ziemba@xxxxxxxxx> --- drivers/crypto/qat/qat_common/qat_asym_algs.c | 137 ++++++++---------- 1 file changed, 60 insertions(+), 77 deletions(-) diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index 9be972430f11..bba4b4d99e94 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -526,25 +526,22 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp) err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL; - if (req->src_align) - dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align, - req->in.rsa.enc.m); - else - dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, - DMA_TO_DEVICE); + kfree_sensitive(req->src_align); + + dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, + DMA_TO_DEVICE); areq->dst_len = req->ctx.rsa->key_sz; if (req->dst_align) { scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, areq->dst_len, 1); - dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align, - req->out.rsa.enc.c); - } else { - dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, - DMA_FROM_DEVICE); + kfree_sensitive(req->dst_align); } + dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, + DMA_FROM_DEVICE); + dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); dma_unmap_single(dev, req->phy_out, @@ -661,6 +658,7 @@ static int qat_rsa_enc(struct akcipher_request *req) struct qat_asym_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; + u8 *vaddr; int ret; if (unlikely(!ctx->n || !ctx->e)) @@ -698,40 +696,39 @@ static int qat_rsa_enc(struct akcipher_request *req) */ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { qat_req->src_align = NULL; - qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src), - req->src_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) - return ret; - + vaddr = sg_virt(req->src); } else { int shift = ctx->key_sz - req->src_len; - qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->in.rsa.enc.m, - GFP_KERNEL); + qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, 0, req->src_len, 0); + vaddr = qat_req->src_align; } - if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { - qat_req->dst_align = NULL; - qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst), - req->dst_len, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) - goto unmap_src; + qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) + goto unmap_src; + if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { + qat_req->dst_align = NULL; + vaddr = sg_virt(req->dst); } else { - qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->out.rsa.enc.c, - GFP_KERNEL); + qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; - + vaddr = qat_req->dst_align; } + + qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) + goto unmap_dst; + qat_req->in.rsa.in_tab[3] = 0; qat_req->out.rsa.out_tab[1] = 0; qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m, @@ -769,21 +766,15 @@ static int qat_rsa_enc(struct akcipher_request *req) sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); unmap_dst: - if (qat_req->dst_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, - qat_req->out.rsa.enc.c); - else - if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) - dma_unmap_single(dev, qat_req->out.rsa.enc.c, - ctx->key_sz, DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) + dma_unmap_single(dev, qat_req->out.rsa.enc.c, + ctx->key_sz, DMA_FROM_DEVICE); + kfree_sensitive(qat_req->dst_align); unmap_src: - if (qat_req->src_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, - qat_req->in.rsa.enc.m); - else - if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) - dma_unmap_single(dev, qat_req->in.rsa.enc.m, - ctx->key_sz, DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) + dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz, + DMA_TO_DEVICE); + kfree_sensitive(qat_req->src_align); return ret; } @@ -796,6 +787,7 @@ static int qat_rsa_dec(struct akcipher_request *req) struct qat_asym_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; + u8 *vaddr; int ret; if (unlikely(!ctx->n || !ctx->d)) @@ -843,40 +835,37 @@ static int qat_rsa_dec(struct akcipher_request *req) */ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { qat_req->src_align = NULL; - qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src), - req->dst_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) - return ret; - + vaddr = sg_virt(req->src); } else { int shift = ctx->key_sz - req->src_len; - qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->in.rsa.dec.c, - GFP_KERNEL); + qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret; scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, 0, req->src_len, 0); + vaddr = qat_req->src_align; } - if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { - qat_req->dst_align = NULL; - qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst), - req->dst_len, - DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) - goto unmap_src; + qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) + goto unmap_src; + if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { + qat_req->dst_align = NULL; + vaddr = sg_virt(req->dst); } else { - qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->out.rsa.dec.m, - GFP_KERNEL); + qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; - + vaddr = qat_req->dst_align; } + qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) + goto unmap_dst; if (ctx->crt_mode) qat_req->in.rsa.in_tab[6] = 0; @@ -922,21 +911,15 @@ static int qat_rsa_dec(struct akcipher_request *req) sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); unmap_dst: - if (qat_req->dst_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, - qat_req->out.rsa.dec.m); - else - if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) - dma_unmap_single(dev, qat_req->out.rsa.dec.m, - ctx->key_sz, DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) + dma_unmap_single(dev, qat_req->out.rsa.dec.m, + ctx->key_sz, DMA_FROM_DEVICE); + kfree_sensitive(qat_req->dst_align); unmap_src: - if (qat_req->src_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, - qat_req->in.rsa.dec.c); - else - if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) - dma_unmap_single(dev, qat_req->in.rsa.dec.c, - ctx->key_sz, DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) + dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz, + DMA_TO_DEVICE); + kfree_sensitive(qat_req->src_align); return ret; } -- 2.35.1