This is a note to let you know that I've just added the patch titled crypto: sahara - fix cbc selftest failure to the 6.1-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: crypto-sahara-fix-cbc-selftest-failure.patch and it can be found in the queue-6.1 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 36ba6de5590333fbba7c5b30bf1c467eb25df0cc Author: Ovidiu Panait <ovidiu.panait@xxxxxxxxxxxxx> Date: Fri Dec 1 19:06:20 2023 +0200 crypto: sahara - fix cbc selftest failure [ Upstream commit 9f10bc28c0fb676ae58aa3bfa358db8f5de124bb ] The kernel crypto API requires that all CBC implementations update the IV buffer to contain the last ciphertext block. This fixes the following cbc selftest error: alg: skcipher: sahara-cbc-aes encryption test failed (wrong output IV) on test vector 0, cfg="in-place (one sglist)" Fixes: 5de8875281e1 ("crypto: sahara - Add driver for SAHARA2 accelerator.") Signed-off-by: Ovidiu Panait <ovidiu.panait@xxxxxxxxxxxxx> Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx> Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c index 0e30d36b0a71..aa9c45ff0f5a 100644 --- a/drivers/crypto/sahara.c +++ b/drivers/crypto/sahara.c @@ -149,6 +149,7 @@ struct sahara_ctx { struct sahara_aes_reqctx { unsigned long mode; + u8 iv_out[AES_BLOCK_SIZE]; struct skcipher_request fallback_req; // keep at the end }; @@ -542,8 +543,24 @@ static int sahara_hw_descriptor_create(struct sahara_dev *dev) return -EINVAL; } +static void sahara_aes_cbc_update_iv(struct skcipher_request *req) +{ + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct sahara_aes_reqctx *rctx = skcipher_request_ctx(req); + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + + /* Update IV buffer to contain the last ciphertext block */ + if (rctx->mode & FLAGS_ENCRYPT) { + sg_pcopy_to_buffer(req->dst, sg_nents(req->dst), req->iv, + ivsize, req->cryptlen - ivsize); + } else { + memcpy(req->iv, rctx->iv_out, ivsize); + } +} + static int sahara_aes_process(struct skcipher_request *req) { + struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct sahara_dev *dev = dev_ptr; struct sahara_ctx *ctx; struct sahara_aes_reqctx *rctx; @@ -565,8 +582,17 @@ static int sahara_aes_process(struct skcipher_request *req) rctx->mode &= FLAGS_MODE_MASK; dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode; - if ((dev->flags & FLAGS_CBC) && req->iv) - memcpy(dev->iv_base, req->iv, AES_KEYSIZE_128); + if ((dev->flags & FLAGS_CBC) && req->iv) { + unsigned int ivsize = crypto_skcipher_ivsize(skcipher); + + memcpy(dev->iv_base, req->iv, ivsize); + + if (!(dev->flags & FLAGS_ENCRYPT)) { + sg_pcopy_to_buffer(req->src, sg_nents(req->src), + rctx->iv_out, ivsize, + req->cryptlen - ivsize); + } + } /* assign new context to device */ dev->ctx = ctx; @@ -589,6 +615,9 @@ static int sahara_aes_process(struct skcipher_request *req) dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE); + if ((dev->flags & FLAGS_CBC) && req->iv) + sahara_aes_cbc_update_iv(req); + return 0; }