Patch "crypto: nx - Fixing SHA update bug" has been added to the 4.1-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    crypto: nx - Fixing SHA update bug

to the 4.1-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     crypto-nx-fixing-sha-update-bug.patch
and it can be found in the queue-4.1 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.


>From 10d87b730e1d9f1442cae6487bb3aef8632bed23 Mon Sep 17 00:00:00 2001
From: Leonidas Da Silva Barbosa <leosilva@xxxxxxxxxxxxxxxxxx>
Date: Thu, 23 Apr 2015 17:41:43 -0300
Subject: crypto: nx - Fixing SHA update bug

From: Leonidas Da Silva Barbosa <leosilva@xxxxxxxxxxxxxxxxxx>

commit 10d87b730e1d9f1442cae6487bb3aef8632bed23 upstream.

Bug happens when a data size less than SHA block size is passed.
Since first attempt will be saved in buffer, second round attempt
get into two step to calculate op.inlen and op.outlen. The issue
resides in this step. A  wrong value of op.inlen and outlen was being
calculated.

This patch fix this eliminate the nx_sha_build_sg_list, that is
useless in SHA's algorithm context. Instead we call nx_build_sg_list
directly and pass a previous calculated max_sg_len to it.

Signed-off-by: Leonidas S. Barbosa <leosilva@xxxxxxxxxxxxxxxxxx>
Signed-off-by: Herbert Xu <herbert@xxxxxxxxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>

---
 drivers/crypto/nx/nx-sha256.c |   84 ++++++++++++++++++++++++-----------------
 drivers/crypto/nx/nx-sha512.c |   85 ++++++++++++++++++++++++------------------
 drivers/crypto/nx/nx.c        |   47 -----------------------
 drivers/crypto/nx/nx.h        |    2 
 4 files changed, 99 insertions(+), 119 deletions(-)

--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -33,8 +33,9 @@ static int nx_sha256_init(struct shash_d
 {
 	struct sha256_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+	struct nx_sg *out_sg;
 	int len;
-	int rc;
+	u32 max_sg_len;
 
 	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
 
@@ -44,15 +45,18 @@ static int nx_sha256_init(struct shash_d
 
 	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
 
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
 	len = SHA256_DIGEST_SIZE;
-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-				  &nx_ctx->op.outlen,
-				  &len,
-				  (u8 *) sctx->state,
-				  NX_DS_SHA256);
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+				  &len, max_sg_len);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
-	if (rc)
-		goto out;
+	if (len != SHA256_DIGEST_SIZE)
+		return -EINVAL;
 
 	sctx->state[0] = __cpu_to_be32(SHA256_H0);
 	sctx->state[1] = __cpu_to_be32(SHA256_H1);
@@ -64,7 +68,6 @@ static int nx_sha256_init(struct shash_d
 	sctx->state[7] = __cpu_to_be32(SHA256_H7);
 	sctx->count = 0;
 
-out:
 	return 0;
 }
 
@@ -74,10 +77,12 @@ static int nx_sha256_update(struct shash
 	struct sha256_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+	struct nx_sg *in_sg;
 	u64 to_process = 0, leftover, total;
 	unsigned long irq_flags;
 	int rc = 0;
 	int data_len;
+	u32 max_sg_len;
 	u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -97,6 +102,12 @@ static int nx_sha256_update(struct shash
 	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
 	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
+	in_sg = nx_ctx->in_sg;
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
 	do {
 		/*
 		 * to_process: the SHA256_BLOCK_SIZE data chunk to process in
@@ -108,25 +119,22 @@ static int nx_sha256_update(struct shash
 
 		if (buf_len) {
 			data_len = buf_len;
-			rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-						  &nx_ctx->op.inlen,
-						  &data_len,
-						  (u8 *) sctx->buf,
-						  NX_DS_SHA256);
+			in_sg = nx_build_sg_list(nx_ctx->in_sg,
+						 (u8 *) sctx->buf,
+						 &data_len,
+						 max_sg_len);
 
-			if (rc || data_len != buf_len)
+			if (data_len != buf_len) {
+				rc = -EINVAL;
 				goto out;
+			}
 		}
 
 		data_len = to_process - buf_len;
-		rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-					  &nx_ctx->op.inlen,
-					  &data_len,
-					  (u8 *) data,
-					  NX_DS_SHA256);
+		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+					 &data_len, max_sg_len);
 
-		if (rc)
-			goto out;
+		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 
 		to_process = (data_len + buf_len);
 		leftover = total - to_process;
@@ -173,12 +181,19 @@ static int nx_sha256_final(struct shash_
 	struct sha256_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+	struct nx_sg *in_sg, *out_sg;
 	unsigned long irq_flags;
-	int rc;
+	u32 max_sg_len;
+	int rc = 0;
 	int len;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
 	/* final is represented by continuing the operation and indicating that
 	 * this is not an intermediate operation */
 	if (sctx->count >= SHA256_BLOCK_SIZE) {
@@ -195,25 +210,24 @@ static int nx_sha256_final(struct shash_
 	csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
 
 	len = sctx->count & (SHA256_BLOCK_SIZE - 1);
-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-				  &nx_ctx->op.inlen,
-				  &len,
-				  (u8 *) sctx->buf,
-				  NX_DS_SHA256);
+	in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
+				 &len, max_sg_len);
 
-	if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
+	if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
+		rc = -EINVAL;
 		goto out;
+	}
 
 	len = SHA256_DIGEST_SIZE;
-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-				  &nx_ctx->op.outlen,
-				  &len,
-				  out,
-				  NX_DS_SHA256);
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
 
-	if (rc || len != SHA256_DIGEST_SIZE)
+	if (len != SHA256_DIGEST_SIZE) {
+		rc = -EINVAL;
 		goto out;
+	}
 
+	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 	if (!nx_ctx->op.outlen) {
 		rc = -EINVAL;
 		goto out;
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -32,8 +32,9 @@ static int nx_sha512_init(struct shash_d
 {
 	struct sha512_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+	struct nx_sg *out_sg;
 	int len;
-	int rc;
+	u32 max_sg_len;
 
 	nx_ctx_init(nx_ctx, HCOP_FC_SHA);
 
@@ -43,15 +44,18 @@ static int nx_sha512_init(struct shash_d
 
 	NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
 
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
 	len = SHA512_DIGEST_SIZE;
-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-				  &nx_ctx->op.outlen,
-				  &len,
-				  (u8 *)sctx->state,
-				  NX_DS_SHA512);
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+				  &len, max_sg_len);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
-	if (rc || len != SHA512_DIGEST_SIZE)
-		goto out;
+	if (len != SHA512_DIGEST_SIZE)
+		return -EINVAL;
 
 	sctx->state[0] = __cpu_to_be64(SHA512_H0);
 	sctx->state[1] = __cpu_to_be64(SHA512_H1);
@@ -63,7 +67,6 @@ static int nx_sha512_init(struct shash_d
 	sctx->state[7] = __cpu_to_be64(SHA512_H7);
 	sctx->count[0] = 0;
 
-out:
 	return 0;
 }
 
@@ -73,10 +76,12 @@ static int nx_sha512_update(struct shash
 	struct sha512_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+	struct nx_sg *in_sg;
 	u64 to_process, leftover = 0, total;
 	unsigned long irq_flags;
 	int rc = 0;
 	int data_len;
+	u32 max_sg_len;
 	u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
@@ -96,6 +101,12 @@ static int nx_sha512_update(struct shash
 	NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
 	NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
 
+	in_sg = nx_ctx->in_sg;
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
 	do {
 		/*
 		 * to_process: the SHA512_BLOCK_SIZE data chunk to process in
@@ -108,25 +119,26 @@ static int nx_sha512_update(struct shash
 
 		if (buf_len) {
 			data_len = buf_len;
-			rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-						  &nx_ctx->op.inlen,
-						  &data_len,
-						  (u8 *) sctx->buf,
-						  NX_DS_SHA512);
+			in_sg = nx_build_sg_list(nx_ctx->in_sg,
+						 (u8 *) sctx->buf,
+						 &data_len, max_sg_len);
 
-			if (rc || data_len != buf_len)
+			if (data_len != buf_len) {
+				rc = -EINVAL;
 				goto out;
+			}
 		}
 
 		data_len = to_process - buf_len;
-		rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-					  &nx_ctx->op.inlen,
-					  &data_len,
-					  (u8 *) data,
-					  NX_DS_SHA512);
+		in_sg = nx_build_sg_list(in_sg, (u8 *) data,
+					 &data_len, max_sg_len);
+
+		nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
 
-		if (rc || data_len != (to_process - buf_len))
+		if (data_len != (to_process - buf_len)) {
+			rc = -EINVAL;
 			goto out;
+		}
 
 		to_process = (data_len + buf_len);
 		leftover = total - to_process;
@@ -172,13 +184,20 @@ static int nx_sha512_final(struct shash_
 	struct sha512_state *sctx = shash_desc_ctx(desc);
 	struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
 	struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+	struct nx_sg *in_sg, *out_sg;
+	u32 max_sg_len;
 	u64 count0;
 	unsigned long irq_flags;
-	int rc;
+	int rc = 0;
 	int len;
 
 	spin_lock_irqsave(&nx_ctx->lock, irq_flags);
 
+	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+	max_sg_len = min_t(u64, max_sg_len,
+			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
 	/* final is represented by continuing the operation and indicating that
 	 * this is not an intermediate operation */
 	if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
@@ -200,24 +219,20 @@ static int nx_sha512_final(struct shash_
 	csbcpb->cpb.sha512.message_bit_length_lo = count0;
 
 	len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
-				  &nx_ctx->op.inlen,
-				  &len,
-				  (u8 *)sctx->buf,
-				  NX_DS_SHA512);
+	in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
+				 max_sg_len);
 
-	if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
+	if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
+		rc = -EINVAL;
 		goto out;
+	}
 
 	len = SHA512_DIGEST_SIZE;
-	rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
-				  &nx_ctx->op.outlen,
-				  &len,
-				  out,
-				  NX_DS_SHA512);
+	out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+				 max_sg_len);
 
-	if (rc)
-		goto out;
+	nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+	nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
 
 	if (!nx_ctx->op.outlen) {
 		rc = -EINVAL;
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -252,53 +252,6 @@ static long int trim_sg_list(struct nx_s
 }
 
 /**
- * nx_sha_build_sg_list - walk and build sg list to sha modes
- *			  using right bounds and limits.
- * @nx_ctx: NX crypto context for the lists we're building
- * @nx_sg: current sg list in or out list
- * @op_len: current op_len to be used in order to build a sg list
- * @nbytes:  number or bytes to be processed
- * @offset: buf offset
- * @mode: SHA256 or SHA512
- */
-int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
-			  struct nx_sg 	      *nx_in_outsg,
-			  s64		      *op_len,
-			  unsigned int        *nbytes,
-			  u8 		      *offset,
-			  u32		      mode)
-{
-	unsigned int delta = 0;
-	unsigned int total = *nbytes;
-	struct nx_sg *nx_insg = nx_in_outsg;
-	unsigned int max_sg_len;
-
-	max_sg_len = min_t(u64, nx_ctx->ap->sglen,
-			nx_driver.of.max_sg_len/sizeof(struct nx_sg));
-	max_sg_len = min_t(u64, max_sg_len,
-			nx_ctx->ap->databytelen/NX_PAGE_SIZE);
-
-	*nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
-	nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
-
-	switch (mode) {
-	case NX_DS_SHA256:
-		if (*nbytes < total)
-			delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
-		break;
-	case NX_DS_SHA512:
-		if (*nbytes < total)
-			delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
-		break;
-	default:
-		return -EINVAL;
-	}
-	*op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
-
-	return 0;
-}
-
-/**
  * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
  *                     scatterlists based on them.
  *
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -153,8 +153,6 @@ void nx_crypto_ctx_exit(struct crypto_tf
 void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
 int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
 		  u32 may_sleep);
-int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
-			 s64 *, unsigned int *, u8 *, u32);
 struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
 int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
 		      struct scatterlist *, struct scatterlist *, unsigned int *,


Patches currently in stable-queue which might be from leosilva@xxxxxxxxxxxxxxxxxx are

queue-4.1/crypto-nx-fixing-nx-data-alignment-with-nx_sg-list.patch
queue-4.1/crypto-nx-fixing-sha-update-bug.patch
--
To unsubscribe from this list: send the line "unsubscribe stable" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html



[Index of Archives]     [Linux Kernel]     [Kernel Development Newbies]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite Hiking]     [Linux Kernel]     [Linux SCSI]