Quoting Nikita Zhandarovich (2023-12-01 13:49:29) > Macro dma_map_sg() may return 0 on error. This patch enables > checks in case of the macro failure and ensures unmapping of > previously mapped buffers with dma_unmap_sg(). > > Found by Linux Verification Center (linuxtesting.org) with static > analysis tool SVACE. > > Fixes: 49186a7d9e46 ("crypto: inside_secure - Avoid dma map if size is zero") > Signed-off-by: Nikita Zhandarovich <n.zhandarovich@xxxxxxxxxx> Reviewed-by: Antoine Tenart <atenart@xxxxxxxxxx> Thanks! Antoine > --- > v2: remove extra level of parentheses and > change return error code from -ENOMEM to EIO > per Antoine Tenart's <atenart@xxxxxxxxxx> suggestion > > drivers/crypto/inside-secure/safexcel_cipher.c | 19 +++++++++++-------- > 1 file changed, 11 insertions(+), 8 deletions(-) > > diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c > index 272c28b5a088..b83818634ae4 100644 > --- a/drivers/crypto/inside-secure/safexcel_cipher.c > +++ b/drivers/crypto/inside-secure/safexcel_cipher.c > @@ -742,9 +742,9 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, > max(totlen_src, totlen_dst)); > return -EINVAL; > } > - if (sreq->nr_src > 0) > - dma_map_sg(priv->dev, src, sreq->nr_src, > - DMA_BIDIRECTIONAL); > + if (sreq->nr_src > 0 && > + !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_BIDIRECTIONAL)) > + return -EIO; > } else { > if (unlikely(totlen_src && (sreq->nr_src <= 0))) { > dev_err(priv->dev, "Source buffer not large enough (need %d bytes)!", > @@ -752,8 +752,9 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, > return -EINVAL; > } > > - if (sreq->nr_src > 0) > - dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE); > + if (sreq->nr_src > 0 && > + !dma_map_sg(priv->dev, src, sreq->nr_src, DMA_TO_DEVICE)) > + return -EIO; > > if (unlikely(totlen_dst && (sreq->nr_dst <= 0))) { > dev_err(priv->dev, "Dest buffer not large enough (need %d bytes)!", > @@ -762,9 +763,11 @@ static int safexcel_send_req(struct crypto_async_request *base, int ring, > goto unmap; > } > > - if (sreq->nr_dst > 0) > - dma_map_sg(priv->dev, dst, sreq->nr_dst, > - DMA_FROM_DEVICE); > + if (sreq->nr_dst > 0 && > + !dma_map_sg(priv->dev, dst, sreq->nr_dst, DMA_FROM_DEVICE)) { > + ret = -EIO; > + goto unmap; > + } > } > > memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len); >