Add a static inline helper modeled after crypto_cbc_encrypt_walk() that can be reused for SIMD algorithms that need to implement a non-SIMD fallback for performing CTR encryption. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx> --- include/crypto/ctr.h | 50 ++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h index 4180fc080e3b..d64017fae41c 100644 --- a/include/crypto/ctr.h +++ b/include/crypto/ctr.h @@ -13,8 +13,58 @@ #ifndef _CRYPTO_CTR_H #define _CRYPTO_CTR_H +#include <crypto/algapi.h> +#include <crypto/internal/skcipher.h> +#include <linux/string.h> +#include <linux/types.h> + #define CTR_RFC3686_NONCE_SIZE 4 #define CTR_RFC3686_IV_SIZE 8 #define CTR_RFC3686_BLOCK_SIZE 16 +static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req, + void (*fn)(struct crypto_skcipher *, + const u8 *, u8 *)) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + int blocksize = crypto_skcipher_chunksize(tfm); + u8 buf[MAX_CIPHER_BLOCKSIZE]; + struct skcipher_walk walk; + int err; + + /* avoid integer division due to variable blocksize parameter */ + if (WARN_ON_ONCE(!is_power_of_2(blocksize))) + return -EINVAL; + + err = skcipher_walk_virt(&walk, req, false); + + while (walk.nbytes > 0) { + u8 *dst = walk.dst.virt.addr; + u8 *src = walk.src.virt.addr; + int nbytes = walk.nbytes; + int tail = 0; + + if (nbytes < walk.total) { + tail = walk.nbytes & (blocksize - 1); + nbytes -= tail; + } + + do { + int bsize = min(nbytes, blocksize); + + fn(tfm, walk.iv, buf); + + crypto_xor_cpy(dst, src, buf, bsize); + crypto_inc(walk.iv, blocksize); + + dst += bsize; + src += bsize; + nbytes -= bsize; + } while (nbytes > 0); + + err = skcipher_walk_done(&walk, tail); + } + return err; +} + #endif /* _CRYPTO_CTR_H */ -- 2.17.1