[PATCH 8/8] crypto: cbc - Convert from skcipher to lskcipher

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Replace the existing skcipher CBC template with an lskcipher version.

Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
---
 crypto/cbc.c | 159 +++++++++++++++++++--------------------------------
 1 file changed, 59 insertions(+), 100 deletions(-)

diff --git a/crypto/cbc.c b/crypto/cbc.c
index 6c03e96b945f..28345b8d921c 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -5,8 +5,6 @@
  * Copyright (c) 2006-2016 Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
  */
 
-#include <crypto/algapi.h>
-#include <crypto/internal/cipher.h>
 #include <crypto/internal/skcipher.h>
 #include <linux/err.h>
 #include <linux/init.h>
@@ -14,99 +12,71 @@
 #include <linux/log2.h>
 #include <linux/module.h>
 
-static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk,
-				      struct crypto_skcipher *skcipher)
+static int crypto_cbc_encrypt_segment(struct crypto_lskcipher *tfm,
+				      const u8 *src, u8 *dst, unsigned nbytes,
+				      u8 *iv)
 {
-	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 *dst = walk->dst.virt.addr;
-	struct crypto_cipher *cipher;
-	struct crypto_tfm *tfm;
-	u8 *iv = walk->iv;
+	unsigned int bsize = crypto_lskcipher_blocksize(tfm);
 
-	cipher = skcipher_cipher_simple(skcipher);
-	tfm = crypto_cipher_tfm(cipher);
-	fn = crypto_cipher_alg(cipher)->cia_encrypt;
-
-	do {
+	for (; nbytes >= bsize; src += bsize, dst += bsize, nbytes -= bsize) {
 		crypto_xor(iv, src, bsize);
-		fn(tfm, dst, iv);
+		crypto_lskcipher_encrypt(tfm, iv, dst, bsize, NULL);
 		memcpy(iv, dst, bsize);
-
-		src += bsize;
-		dst += bsize;
-	} while ((nbytes -= bsize) >= bsize);
+	}
 
 	return nbytes;
 }
 
-static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk,
-				      struct crypto_skcipher *skcipher)
+static int crypto_cbc_encrypt_inplace(struct crypto_lskcipher *tfm,
+				      u8 *src, unsigned nbytes, u8 *oiv)
 {
-	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	struct crypto_cipher *cipher;
-	struct crypto_tfm *tfm;
-	u8 *iv = walk->iv;
+	unsigned int bsize = crypto_lskcipher_blocksize(tfm);
+	u8 *iv = oiv;
 
-	cipher = skcipher_cipher_simple(skcipher);
-	tfm = crypto_cipher_tfm(cipher);
-	fn = crypto_cipher_alg(cipher)->cia_encrypt;
+	if (nbytes < bsize)
+		goto out;
 
 	do {
 		crypto_xor(src, iv, bsize);
-		fn(tfm, src, src);
+		crypto_lskcipher_encrypt(tfm, src, src, bsize, NULL);
 		iv = src;
 
 		src += bsize;
 	} while ((nbytes -= bsize) >= bsize);
 
-	memcpy(walk->iv, iv, bsize);
+	memcpy(oiv, iv, bsize);
 
+out:
 	return nbytes;
 }
 
-static int crypto_cbc_encrypt(struct skcipher_request *req)
+static int crypto_cbc_encrypt(struct crypto_lskcipher *tfm, const u8 *src,
+			      u8 *dst, unsigned len, u8 *iv, bool final)
 {
-	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-	struct skcipher_walk walk;
-	int err;
+	struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+	struct crypto_lskcipher *cipher = *ctx;
+	int rem;
 
-	err = skcipher_walk_virt(&walk, req, false);
+	if (src == dst)
+		rem = crypto_cbc_encrypt_inplace(cipher, dst, len, iv);
+	else
+		rem = crypto_cbc_encrypt_segment(cipher, src, dst, len, iv);
 
-	while (walk.nbytes) {
-		if (walk.src.virt.addr == walk.dst.virt.addr)
-			err = crypto_cbc_encrypt_inplace(&walk, skcipher);
-		else
-			err = crypto_cbc_encrypt_segment(&walk, skcipher);
-		err = skcipher_walk_done(&walk, err);
-	}
-
-	return err;
+	return rem && final ? -EINVAL : rem;
 }
 
-static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
-				      struct crypto_skcipher *skcipher)
+static int crypto_cbc_decrypt_segment(struct crypto_lskcipher *tfm,
+				      const u8 *src, u8 *dst, unsigned nbytes,
+				      u8 *oiv)
 {
-	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
-	u8 *dst = walk->dst.virt.addr;
-	struct crypto_cipher *cipher;
-	struct crypto_tfm *tfm;
-	u8 *iv = walk->iv;
+	unsigned int bsize = crypto_lskcipher_blocksize(tfm);
+	const u8 *iv = oiv;
 
-	cipher = skcipher_cipher_simple(skcipher);
-	tfm = crypto_cipher_tfm(cipher);
-	fn = crypto_cipher_alg(cipher)->cia_decrypt;
+	if (nbytes < bsize)
+		goto out;
 
 	do {
-		fn(tfm, dst, src);
+		crypto_lskcipher_decrypt(tfm, src, dst, bsize, NULL);
 		crypto_xor(dst, iv, bsize);
 		iv = src;
 
@@ -114,83 +84,72 @@ static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
 		dst += bsize;
 	} while ((nbytes -= bsize) >= bsize);
 
-	memcpy(walk->iv, iv, bsize);
+	memcpy(oiv, iv, bsize);
 
+out:
 	return nbytes;
 }
 
-static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk,
-				      struct crypto_skcipher *skcipher)
+static int crypto_cbc_decrypt_inplace(struct crypto_lskcipher *tfm,
+				      u8 *src, unsigned nbytes, u8 *iv)
 {
-	unsigned int bsize = crypto_skcipher_blocksize(skcipher);
-	void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
-	unsigned int nbytes = walk->nbytes;
-	u8 *src = walk->src.virt.addr;
+	unsigned int bsize = crypto_lskcipher_blocksize(tfm);
 	u8 last_iv[MAX_CIPHER_BLOCKSIZE];
-	struct crypto_cipher *cipher;
-	struct crypto_tfm *tfm;
 
-	cipher = skcipher_cipher_simple(skcipher);
-	tfm = crypto_cipher_tfm(cipher);
-	fn = crypto_cipher_alg(cipher)->cia_decrypt;
+	if (nbytes < bsize)
+		goto out;
 
 	/* Start of the last block. */
 	src += nbytes - (nbytes & (bsize - 1)) - bsize;
 	memcpy(last_iv, src, bsize);
 
 	for (;;) {
-		fn(tfm, src, src);
+		crypto_lskcipher_decrypt(tfm, src, src, bsize, NULL);
 		if ((nbytes -= bsize) < bsize)
 			break;
 		crypto_xor(src, src - bsize, bsize);
 		src -= bsize;
 	}
 
-	crypto_xor(src, walk->iv, bsize);
-	memcpy(walk->iv, last_iv, bsize);
+	crypto_xor(src, iv, bsize);
+	memcpy(iv, last_iv, bsize);
 
+out:
 	return nbytes;
 }
 
-static int crypto_cbc_decrypt(struct skcipher_request *req)
+static int crypto_cbc_decrypt(struct crypto_lskcipher *tfm, const u8 *src,
+			      u8 *dst, unsigned len, u8 *iv, bool final)
 {
-	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
-	struct skcipher_walk walk;
-	int err;
+	struct crypto_lskcipher **ctx = crypto_lskcipher_ctx(tfm);
+	struct crypto_lskcipher *cipher = *ctx;
+	int rem;
 
-	err = skcipher_walk_virt(&walk, req, false);
+	if (src == dst)
+		rem = crypto_cbc_decrypt_inplace(cipher, dst, len, iv);
+	else
+		rem = crypto_cbc_decrypt_segment(cipher, src, dst, len, iv);
 
-	while (walk.nbytes) {
-		if (walk.src.virt.addr == walk.dst.virt.addr)
-			err = crypto_cbc_decrypt_inplace(&walk, skcipher);
-		else
-			err = crypto_cbc_decrypt_segment(&walk, skcipher);
-		err = skcipher_walk_done(&walk, err);
-	}
-
-	return err;
+	return rem && final ? -EINVAL : rem;
 }
 
 static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
 {
-	struct skcipher_instance *inst;
-	struct crypto_alg *alg;
+	struct lskcipher_instance *inst;
 	int err;
 
-	inst = skcipher_alloc_instance_simple(tmpl, tb);
+	inst = lskcipher_alloc_instance_simple(tmpl, tb);
 	if (IS_ERR(inst))
 		return PTR_ERR(inst);
 
-	alg = skcipher_ialg_simple(inst);
-
 	err = -EINVAL;
-	if (!is_power_of_2(alg->cra_blocksize))
+	if (!is_power_of_2(inst->alg.co.base.cra_blocksize))
 		goto out_free_inst;
 
 	inst->alg.encrypt = crypto_cbc_encrypt;
 	inst->alg.decrypt = crypto_cbc_decrypt;
 
-	err = skcipher_register_instance(tmpl, inst);
+	err = lskcipher_register_instance(tmpl, inst);
 	if (err) {
 out_free_inst:
 		inst->free(inst);
-- 
Email: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt




[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]
  Powered by Linux