From: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx> commit ed527b13d800dd515a9e6c582f0a73eca65b2e1b upstream. The CAAM driver currently violates an undocumented and slightly controversial requirement imposed by the crypto stack that a buffer referred to by the request structure via its virtual address may not be modified while any scatterlists passed via the same request structure are mapped for inbound DMA. This may result in errors like alg: aead: decryption failed on test 1 for gcm_base(ctr-aes-caam,ghash-generic): ret=74 alg: aead: Failed to load transform for gcm(aes): -2 on non-cache coherent systems, due to the fact that the GCM driver passes an IV buffer by virtual address which shares a cacheline with the auth_tag buffer passed via a scatterlist, resulting in corruption of the auth_tag when the IV is updated while the DMA mapping is live. Since the IV that is returned to the caller is only valid for CBC mode, and given that the in-kernel users of CBC (such as CTS) don't trigger the same issue as the GCM driver, let's just disable the output IV generation for all modes except CBC for the time being. Fixes: 854b06f76879 ("crypto: caam - properly set IV after {en,de}crypt") Cc: Horia Geanta <horia.geanta@xxxxxxx> Cc: Iuliana Prodan <iuliana.prodan@xxxxxxx> Reported-by: Sascha Hauer <s.hauer@xxxxxxxxxxxxxx> Cc: <stable@xxxxxxxxxxxxxxx> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx> Reviewed-by: Horia Geanta <horia.geanta@xxxxxxx> Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx> [ Horia: backported to 4.14, 4.19 ] Signed-off-by: Horia Geantă <horia.geanta@xxxxxxx> --- drivers/crypto/caam/caamalg.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 9bc54c3c2cb9..1907945f82b7 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -887,6 +887,7 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, struct ablkcipher_request *req = context; struct ablkcipher_edesc *edesc; struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); + struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); #ifdef DEBUG @@ -911,10 +912,11 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, /* * The crypto API expects us to set the IV (req->info) to the last - * ciphertext block. This is used e.g. by the CTS mode. + * ciphertext block when running in CBC mode. */ - scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, - ivsize, 0); + if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC) + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - + ivsize, ivsize, 0); /* In case initial IV was generated, copy it in GIVCIPHER request */ if (edesc->iv_dir == DMA_FROM_DEVICE) { @@ -1651,10 +1653,11 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req) /* * The crypto API expects us to set the IV (req->info) to the last - * ciphertext block. + * ciphertext block when running in CBC mode. */ - scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, - ivsize, 0); + if ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CBC) + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - + ivsize, ivsize, 0); /* Create and submit job descriptor*/ init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req); -- 2.17.1