[PATCH v2 6/6] crypto: lib/sha - Combine round constants and message schedule

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Putting the round constants and the message schedule arrays together in
one structure saves one register, which can be a significant benefit on
register-constrained architectures. On x86-32 (tested on Broadwell
Xeon), this gives a 10% performance benefit.

Signed-off-by: Arvind Sankar <nivedita@xxxxxxxxxxxx>
Suggested-by: David Laight <David.Laight@xxxxxxxxxx>
---
 lib/crypto/sha256.c | 49 ++++++++++++++++++++++++++-------------------
 1 file changed, 28 insertions(+), 21 deletions(-)

diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c
index 3a8802d5f747..985cd0560d79 100644
--- a/lib/crypto/sha256.c
+++ b/lib/crypto/sha256.c
@@ -29,6 +29,11 @@ static const u32 SHA256_K[] = {
 	0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
 };
 
+struct KW {
+	u32 K[64];
+	u32 W[64];
+};
+
 static inline u32 Ch(u32 x, u32 y, u32 z)
 {
 	return z ^ (x & (y ^ z));
@@ -56,39 +61,39 @@ static inline void BLEND_OP(int I, u32 *W)
 
 #define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do {		\
 	u32 t1, t2;						\
-	t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i];	\
+	t1 = h + e1(e) + Ch(e, f, g) + KW->K[i] + KW->W[i];	\
 	t2 = e0(a) + Maj(a, b, c);				\
 	d += t1;						\
 	h = t1 + t2;						\
 } while (0)
 
-static void sha256_transform(u32 *state, const u8 *input, u32 *W)
+static void sha256_transform(u32 *state, const u8 *input, struct KW *KW)
 {
 	u32 a, b, c, d, e, f, g, h;
 	int i;
 
 	/* load the input */
 	for (i = 0; i < 16; i += 8) {
-		LOAD_OP(i + 0, W, input);
-		LOAD_OP(i + 1, W, input);
-		LOAD_OP(i + 2, W, input);
-		LOAD_OP(i + 3, W, input);
-		LOAD_OP(i + 4, W, input);
-		LOAD_OP(i + 5, W, input);
-		LOAD_OP(i + 6, W, input);
-		LOAD_OP(i + 7, W, input);
+		LOAD_OP(i + 0, KW->W, input);
+		LOAD_OP(i + 1, KW->W, input);
+		LOAD_OP(i + 2, KW->W, input);
+		LOAD_OP(i + 3, KW->W, input);
+		LOAD_OP(i + 4, KW->W, input);
+		LOAD_OP(i + 5, KW->W, input);
+		LOAD_OP(i + 6, KW->W, input);
+		LOAD_OP(i + 7, KW->W, input);
 	}
 
 	/* now blend */
 	for (i = 16; i < 64; i += 8) {
-		BLEND_OP(i + 0, W);
-		BLEND_OP(i + 1, W);
-		BLEND_OP(i + 2, W);
-		BLEND_OP(i + 3, W);
-		BLEND_OP(i + 4, W);
-		BLEND_OP(i + 5, W);
-		BLEND_OP(i + 6, W);
-		BLEND_OP(i + 7, W);
+		BLEND_OP(i + 0, KW->W);
+		BLEND_OP(i + 1, KW->W);
+		BLEND_OP(i + 2, KW->W);
+		BLEND_OP(i + 3, KW->W);
+		BLEND_OP(i + 4, KW->W);
+		BLEND_OP(i + 5, KW->W);
+		BLEND_OP(i + 6, KW->W);
+		BLEND_OP(i + 7, KW->W);
 	}
 
 	/* load the state into our registers */
@@ -115,7 +120,7 @@ void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
 {
 	unsigned int partial, done;
 	const u8 *src;
-	u32 W[64];
+	struct KW KW;
 
 	partial = sctx->count & 0x3f;
 	sctx->count += len;
@@ -129,13 +134,15 @@ void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len)
 			src = sctx->buf;
 		}
 
+		memcpy(KW.K, SHA256_K, sizeof(KW.K));
+
 		do {
-			sha256_transform(sctx->state, src, W);
+			sha256_transform(sctx->state, src, &KW);
 			done += 64;
 			src = data + done;
 		} while (done + 63 < len);
 
-		memzero_explicit(W, sizeof(W));
+		memzero_explicit(KW.W, sizeof(KW.W));
 
 		partial = 0;
 	}
-- 
2.26.2




[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]

  Powered by Linux