From: Eric Biggers <ebiggers@xxxxxxxxxx> To improve responsiveness, disable preemption for each step of the walk (which is at most PAGE_SIZE) rather than for the entire encryption/decryption operation. Signed-off-by: Eric Biggers <ebiggers@xxxxxxxxxx> --- arch/x86/crypto/chacha20_glue.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/x86/crypto/chacha20_glue.c b/arch/x86/crypto/chacha20_glue.c index 773d075a1483..036de144aab6 100644 --- a/arch/x86/crypto/chacha20_glue.c +++ b/arch/x86/crypto/chacha20_glue.c @@ -135,26 +135,24 @@ static int chacha20_simd(struct skcipher_request *req) if (req->cryptlen <= CHACHA_BLOCK_SIZE || !may_use_simd()) return crypto_chacha_crypt(req); - err = skcipher_walk_virt(&walk, req, true); + err = skcipher_walk_virt(&walk, req, false); crypto_chacha_init(state, ctx, walk.iv); - kernel_fpu_begin(); - while (walk.nbytes > 0) { unsigned int nbytes = walk.nbytes; if (nbytes < walk.total) nbytes = round_down(nbytes, walk.stride); + kernel_fpu_begin(); chacha20_dosimd(state, walk.dst.virt.addr, walk.src.virt.addr, nbytes); + kernel_fpu_end(); err = skcipher_walk_done(&walk, walk.nbytes - nbytes); } - kernel_fpu_end(); - return err; } -- 2.20.0.rc0.387.gc7a69e6b6c-goog