From: Ard Biesheuvel <ardb@xxxxxxxxxx> Now that kernel mode NEON no longer disables preemption, we no longer have to take care to disable and re-enable use of the NEON when calling into the skcipher walk API. So just keep it enabled until done. Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> --- arch/arm64/crypto/aes-ce-ccm-glue.c | 22 +++++++++----------- 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c index c4f14415f5f0..b177ebea7d09 100644 --- a/arch/arm64/crypto/aes-ce-ccm-glue.c +++ b/arch/arm64/crypto/aes-ce-ccm-glue.c @@ -182,17 +182,16 @@ static int ccm_encrypt(struct aead_request *req) if (walk.nbytes == walk.total) ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - kernel_neon_end(); - if (walk.nbytes) { err = skcipher_walk_done(&walk, tail); - if (unlikely(err)) - return err; - if (unlikely(walk.nbytes)) - kernel_neon_begin(); } } while (walk.nbytes); + kernel_neon_end(); + + if (unlikely(err)) + return err; + /* copy authtag to end of dst */ scatterwalk_map_and_copy(mac, req->dst, req->assoclen + req->cryptlen, crypto_aead_authsize(aead), 1); @@ -240,17 +239,16 @@ static int ccm_decrypt(struct aead_request *req) if (walk.nbytes == walk.total) ce_aes_ccm_final(mac, buf, ctx->key_enc, num_rounds(ctx)); - kernel_neon_end(); - if (walk.nbytes) { err = skcipher_walk_done(&walk, tail); - if (unlikely(err)) - return err; - if (unlikely(walk.nbytes)) - kernel_neon_begin(); } } while (walk.nbytes); + kernel_neon_end(); + + if (unlikely(err)) + return err; + /* compare calculated auth tag with the stored one */ scatterwalk_map_and_copy(buf, req->src, req->assoclen + req->cryptlen - authsize, -- 2.43.0.381.gb435a96ce8-goog