[PATCH] crypto: x86: Do not acquire fpu context for too long

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Many drivers such as camellia, cast5, etc are using ECB macro.
ECB macro internally calls kernel_fpu_begin() then calls encrypt()
and decrypt() and then it calls kernel_fpu_end().
If too many blocks are given, it acquires fpu context for too long.

kernel_fpu_{begin | end}() internally calls preempt_{disable | enable}().
So, RCU stall would occur because of too long FPU context
period.

The purpose of this is to not exceed 4096 bytes to encrypt() and
decrypt() in an FPU context.

Fixes: 827ee47228a6 ("crypto: x86 - add some helper macros for ECB and CBC modes")
Suggested-by: Elliott, Robert (Servers) <elliott@xxxxxxx>
Signed-off-by: Taehee Yoo <ap420073@xxxxxxxxx>
---
 arch/x86/crypto/ecb_cbc_helpers.h | 34 ++++++++++++++++++++++++-------
 1 file changed, 27 insertions(+), 7 deletions(-)

diff --git a/arch/x86/crypto/ecb_cbc_helpers.h b/arch/x86/crypto/ecb_cbc_helpers.h
index eaa15c7b29d6..551d8bdfd037 100644
--- a/arch/x86/crypto/ecb_cbc_helpers.h
+++ b/arch/x86/crypto/ecb_cbc_helpers.h
@@ -11,19 +11,35 @@
  * having to rely on indirect calls and retpolines.
  */
 
+#define ECB_CBC_WALK_MAX	4096
+
 #define ECB_WALK_START(req, bsize, fpu_blocks) do {			\
 	void *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));	\
+	unsigned int walked_bytes = 0;					\
 	const int __bsize = (bsize);					\
 	struct skcipher_walk walk;					\
-	int err = skcipher_walk_virt(&walk, (req), false);		\
+	int err;							\
+									\
+	err = skcipher_walk_virt(&walk, (req), false);			\
 	while (walk.nbytes > 0) {					\
-		unsigned int nbytes = walk.nbytes;			\
-		bool do_fpu = (fpu_blocks) != -1 &&			\
-			      nbytes >= (fpu_blocks) * __bsize;		\
 		const u8 *src = walk.src.virt.addr;			\
-		u8 *dst = walk.dst.virt.addr;				\
 		u8 __maybe_unused buf[(bsize)];				\
-		if (do_fpu) kernel_fpu_begin()
+		u8 *dst = walk.dst.virt.addr;				\
+		unsigned int nbytes;					\
+		bool do_fpu;						\
+									\
+		if (walk.nbytes - walked_bytes > ECB_CBC_WALK_MAX) {	\
+			nbytes = ECB_CBC_WALK_MAX;			\
+			walked_bytes += ECB_CBC_WALK_MAX;		\
+		} else {						\
+			nbytes = walk.nbytes - walked_bytes;		\
+			walked_bytes = walk.nbytes;			\
+		}							\
+									\
+		do_fpu = (fpu_blocks) != -1 &&				\
+			 nbytes >= (fpu_blocks) * __bsize;		\
+		if (do_fpu)						\
+			kernel_fpu_begin()
 
 #define CBC_WALK_START(req, bsize, fpu_blocks)				\
 	ECB_WALK_START(req, bsize, fpu_blocks)
@@ -65,8 +81,12 @@
 } while (0)
 
 #define ECB_WALK_END()							\
-		if (do_fpu) kernel_fpu_end();				\
+		if (do_fpu)						\
+			kernel_fpu_end();				\
+		if (walked_bytes < walk.nbytes)				\
+			continue;					\
 		err = skcipher_walk_done(&walk, nbytes);		\
+		walked_bytes = 0;					\
 	}								\
 	return err;							\
 } while (0)
-- 
2.17.1




[Index of Archives]     [Kernel]     [Gnu Classpath]     [Gnu Crypto]     [DM Crypt]     [Netfilter]     [Bugtraq]
  Powered by Linux