[PATCH 27/30] crypto: lrw - remove lrw_crypt()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Eric Biggers <ebiggers@xxxxxxxxxx>

Now that all users of lrw_crypt() have been removed in favor of the LRW
template wrapping an ECB mode algorithm, remove lrw_crypt().  Also
remove crypto/lrw.h as that is no longer needed either; and fold
'struct lrw_table_ctx' into 'struct priv', lrw_init_table() into
setkey(), and lrw_free_table() into exit_tfm().

Signed-off-by: Eric Biggers <ebiggers@xxxxxxxxxx>
---
 arch/x86/crypto/glue_helper.c |   1 -
 crypto/lrw.c                  | 152 +++++++++++-------------------------------
 include/crypto/lrw.h          |  44 ------------
 3 files changed, 39 insertions(+), 158 deletions(-)
 delete mode 100644 include/crypto/lrw.h

diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c
index 5b909790bf8af..cd5e7cebdb9f2 100644
--- a/arch/x86/crypto/glue_helper.c
+++ b/arch/x86/crypto/glue_helper.c
@@ -29,7 +29,6 @@
 #include <crypto/b128ops.h>
 #include <crypto/gf128mul.h>
 #include <crypto/internal/skcipher.h>
-#include <crypto/lrw.h>
 #include <crypto/xts.h>
 #include <asm/crypto/glue_helper.h>
 
diff --git a/crypto/lrw.c b/crypto/lrw.c
index cbbd7c50ad19b..a09cdaa6ddf3b 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -28,13 +28,31 @@
 
 #include <crypto/b128ops.h>
 #include <crypto/gf128mul.h>
-#include <crypto/lrw.h>
 
 #define LRW_BUFFER_SIZE 128u
 
+#define LRW_BLOCK_SIZE 16
+
 struct priv {
 	struct crypto_skcipher *child;
-	struct lrw_table_ctx table;
+
+	/*
+	 * optimizes multiplying a random (non incrementing, as at the
+	 * start of a new sector) value with key2, we could also have
+	 * used 4k optimization tables or no optimization at all. In the
+	 * latter case we would have to store key2 here
+	 */
+	struct gf128mul_64k *table;
+
+	/*
+	 * stores:
+	 *  key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
+	 *  key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
+	 *  key2*{ 0,0,...1,1,1,1,1 }, etc
+	 * needed for optimized multiplication of incrementing values
+	 * with key2
+	 */
+	be128 mulinc[128];
 };
 
 struct rctx {
@@ -65,11 +83,25 @@ static inline void setbit128_bbe(void *b, int bit)
 			), b);
 }
 
-int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
+static int setkey(struct crypto_skcipher *parent, const u8 *key,
+		  unsigned int keylen)
 {
+	struct priv *ctx = crypto_skcipher_ctx(parent);
+	struct crypto_skcipher *child = ctx->child;
+	int err, bsize = LRW_BLOCK_SIZE;
+	const u8 *tweak = key + keylen - bsize;
 	be128 tmp = { 0 };
 	int i;
 
+	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+					 CRYPTO_TFM_REQ_MASK);
+	err = crypto_skcipher_setkey(child, key, keylen - bsize);
+	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+					  CRYPTO_TFM_RES_MASK);
+	if (err)
+		return err;
+
 	if (ctx->table)
 		gf128mul_free_64k(ctx->table);
 
@@ -87,34 +119,6 @@ int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak)
 
 	return 0;
 }
-EXPORT_SYMBOL_GPL(lrw_init_table);
-
-void lrw_free_table(struct lrw_table_ctx *ctx)
-{
-	if (ctx->table)
-		gf128mul_free_64k(ctx->table);
-}
-EXPORT_SYMBOL_GPL(lrw_free_table);
-
-static int setkey(struct crypto_skcipher *parent, const u8 *key,
-		  unsigned int keylen)
-{
-	struct priv *ctx = crypto_skcipher_ctx(parent);
-	struct crypto_skcipher *child = ctx->child;
-	int err, bsize = LRW_BLOCK_SIZE;
-	const u8 *tweak = key + keylen - bsize;
-
-	crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
-	crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
-					 CRYPTO_TFM_REQ_MASK);
-	err = crypto_skcipher_setkey(child, key, keylen - bsize);
-	crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
-					  CRYPTO_TFM_RES_MASK);
-	if (err)
-		return err;
-
-	return lrw_init_table(&ctx->table, tweak);
-}
 
 static inline void inc(be128 *iv)
 {
@@ -238,7 +242,7 @@ static int pre_crypt(struct skcipher_request *req)
 			/* T <- I*Key2, using the optimization
 			 * discussed in the specification */
 			be128_xor(&rctx->t, &rctx->t,
-				  &ctx->table.mulinc[get_index128(iv)]);
+				  &ctx->mulinc[get_index128(iv)]);
 			inc(iv);
 		} while ((avail -= bs) >= bs);
 
@@ -301,7 +305,7 @@ static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
 	memcpy(&rctx->t, req->iv, sizeof(rctx->t));
 
 	/* T <- I*Key2 */
-	gf128mul_64k_bbe(&rctx->t, ctx->table.table);
+	gf128mul_64k_bbe(&rctx->t, ctx->table);
 
 	return 0;
 }
@@ -416,85 +420,6 @@ static int decrypt(struct skcipher_request *req)
 	return do_decrypt(req, init_crypt(req, decrypt_done));
 }
 
-int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
-	      struct scatterlist *ssrc, unsigned int nbytes,
-	      struct lrw_crypt_req *req)
-{
-	const unsigned int bsize = LRW_BLOCK_SIZE;
-	const unsigned int max_blks = req->tbuflen / bsize;
-	struct lrw_table_ctx *ctx = req->table_ctx;
-	struct blkcipher_walk walk;
-	unsigned int nblocks;
-	be128 *iv, *src, *dst, *t;
-	be128 *t_buf = req->tbuf;
-	int err, i;
-
-	BUG_ON(max_blks < 1);
-
-	blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
-
-	err = blkcipher_walk_virt(desc, &walk);
-	nbytes = walk.nbytes;
-	if (!nbytes)
-		return err;
-
-	nblocks = min(walk.nbytes / bsize, max_blks);
-	src = (be128 *)walk.src.virt.addr;
-	dst = (be128 *)walk.dst.virt.addr;
-
-	/* calculate first value of T */
-	iv = (be128 *)walk.iv;
-	t_buf[0] = *iv;
-
-	/* T <- I*Key2 */
-	gf128mul_64k_bbe(&t_buf[0], ctx->table);
-
-	i = 0;
-	goto first;
-
-	for (;;) {
-		do {
-			for (i = 0; i < nblocks; i++) {
-				/* T <- I*Key2, using the optimization
-				 * discussed in the specification */
-				be128_xor(&t_buf[i], t,
-						&ctx->mulinc[get_index128(iv)]);
-				inc(iv);
-first:
-				t = &t_buf[i];
-
-				/* PP <- T xor P */
-				be128_xor(dst + i, t, src + i);
-			}
-
-			/* CC <- E(Key2,PP) */
-			req->crypt_fn(req->crypt_ctx, (u8 *)dst,
-				      nblocks * bsize);
-
-			/* C <- T xor CC */
-			for (i = 0; i < nblocks; i++)
-				be128_xor(dst + i, dst + i, &t_buf[i]);
-
-			src += nblocks;
-			dst += nblocks;
-			nbytes -= nblocks * bsize;
-			nblocks = min(nbytes / bsize, max_blks);
-		} while (nblocks > 0);
-
-		err = blkcipher_walk_done(desc, &walk, nbytes);
-		nbytes = walk.nbytes;
-		if (!nbytes)
-			break;
-
-		nblocks = min(nbytes / bsize, max_blks);
-		src = (be128 *)walk.src.virt.addr;
-		dst = (be128 *)walk.dst.virt.addr;
-	}
-
-	return err;
-}
-EXPORT_SYMBOL_GPL(lrw_crypt);
-
 static int init_tfm(struct crypto_skcipher *tfm)
 {
 	struct skcipher_instance *inst = skcipher_alg_instance(tfm);
@@ -518,7 +443,8 @@ static void exit_tfm(struct crypto_skcipher *tfm)
 {
 	struct priv *ctx = crypto_skcipher_ctx(tfm);
 
-	lrw_free_table(&ctx->table);
+	if (ctx->table)
+		gf128mul_free_64k(ctx->table);
 	crypto_free_skcipher(ctx->child);
 }
 
diff --git a/include/crypto/lrw.h b/include/crypto/lrw.h
deleted file mode 100644
index a9d44c06d0812..0000000000000
--- a/include/crypto/lrw.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CRYPTO_LRW_H
-#define _CRYPTO_LRW_H
-
-#include <crypto/b128ops.h>
-
-struct scatterlist;
-struct gf128mul_64k;
-struct blkcipher_desc;
-
-#define LRW_BLOCK_SIZE 16
-
-struct lrw_table_ctx {
-	/* optimizes multiplying a random (non incrementing, as at the
-	 * start of a new sector) value with key2, we could also have
-	 * used 4k optimization tables or no optimization at all. In the
-	 * latter case we would have to store key2 here */
-	struct gf128mul_64k *table;
-	/* stores:
-	 *  key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 },
-	 *  key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 }
-	 *  key2*{ 0,0,...1,1,1,1,1 }, etc
-	 * needed for optimized multiplication of incrementing values
-	 * with key2 */
-	be128 mulinc[128];
-};
-
-int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak);
-void lrw_free_table(struct lrw_table_ctx *ctx);
-
-struct lrw_crypt_req {
-	be128 *tbuf;
-	unsigned int tbuflen;
-
-	struct lrw_table_ctx *table_ctx;
-	void *crypt_ctx;
-	void (*crypt_fn)(void *ctx, u8 *blks, unsigned int nbytes);
-};
-
-int lrw_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
-	      struct scatterlist *src, unsigned int nbytes,
-	      struct lrw_crypt_req *req);
-
-#endif  /* _CRYPTO_LRW_H */
-- 
2.16.2




[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]

  Powered by Linux