Patch "random: unify batched entropy implementations" has been added to the 5.18-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    random: unify batched entropy implementations

to the 5.18-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     random-unify-batched-entropy-implementations.patch
and it can be found in the queue-5.18 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.


>From foo@baz Thu May 26 04:17:01 PM CEST 2022
From: "Jason A. Donenfeld" <Jason@xxxxxxxxx>
Date: Sun, 15 May 2022 00:22:05 +0200
Subject: random: unify batched entropy implementations

From: "Jason A. Donenfeld" <Jason@xxxxxxxxx>

commit 3092adcef3ffd2ef59634998297ca8358461ebce upstream.

There are currently two separate batched entropy implementations, for
u32 and u64, with nearly identical code, with the goal of avoiding
unaligned memory accesses and letting the buffers be used more
efficiently. Having to maintain these two functions independently is a
bit of a hassle though, considering that they always need to be kept in
sync.

This commit factors them out into a type-generic macro, so that the
expansion produces the same code as before, such that diffing the
assembly shows no differences. This will also make it easier in the
future to add u16 and u8 batches.

This was initially tested using an always_inline function and letting
gcc constant fold the type size in, but the code gen was less efficient,
and in general it was more verbose and harder to follow. So this patch
goes with the boring macro solution, similar to what's already done for
the _wait functions in random.h.

Cc: Dominik Brodowski <linux@xxxxxxxxxxxxxxxxxxxx>
Signed-off-by: Jason A. Donenfeld <Jason@xxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
 drivers/char/random.c |  145 ++++++++++++++++++--------------------------------
 1 file changed, 54 insertions(+), 91 deletions(-)

--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -509,99 +509,62 @@ out_zero_chacha:
  * provided by this function is okay, the function wait_for_random_bytes()
  * should be called and return 0 at least once at any point prior.
  */
-struct batched_entropy {
-	union {
-		/*
-		 * We make this 1.5x a ChaCha block, so that we get the
-		 * remaining 32 bytes from fast key erasure, plus one full
-		 * block from the detached ChaCha state. We can increase
-		 * the size of this later if needed so long as we keep the
-		 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
-		 */
-		u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
-		u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
-	};
-	local_lock_t lock;
-	unsigned long generation;
-	unsigned int position;
-};
 
+#define DEFINE_BATCHED_ENTROPY(type)						\
+struct batch_ ##type {								\
+	/*									\
+	 * We make this 1.5x a ChaCha block, so that we get the			\
+	 * remaining 32 bytes from fast key erasure, plus one full		\
+	 * block from the detached ChaCha state. We can increase		\
+	 * the size of this later if needed so long as we keep the		\
+	 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.		\
+	 */									\
+	type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))];		\
+	local_lock_t lock;							\
+	unsigned long generation;						\
+	unsigned int position;							\
+};										\
+										\
+static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = {	\
+	.lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock),			\
+	.position = UINT_MAX							\
+};										\
+										\
+type get_random_ ##type(void)							\
+{										\
+	type ret;								\
+	unsigned long flags;							\
+	struct batch_ ##type *batch;						\
+	unsigned long next_gen;							\
+										\
+	warn_unseeded_randomness();						\
+										\
+	if  (!crng_ready()) {							\
+		_get_random_bytes(&ret, sizeof(ret));				\
+		return ret;							\
+	}									\
+										\
+	local_lock_irqsave(&batched_entropy_ ##type.lock, flags);		\
+	batch = raw_cpu_ptr(&batched_entropy_##type);				\
+										\
+	next_gen = READ_ONCE(base_crng.generation);				\
+	if (batch->position >= ARRAY_SIZE(batch->entropy) ||			\
+	    next_gen != batch->generation) {					\
+		_get_random_bytes(batch->entropy, sizeof(batch->entropy));	\
+		batch->position = 0;						\
+		batch->generation = next_gen;					\
+	}									\
+										\
+	ret = batch->entropy[batch->position];					\
+	batch->entropy[batch->position] = 0;					\
+	++batch->position;							\
+	local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags);		\
+	return ret;								\
+}										\
+EXPORT_SYMBOL(get_random_ ##type);
 
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
-	.lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
-	.position = UINT_MAX
-};
-
-u64 get_random_u64(void)
-{
-	u64 ret;
-	unsigned long flags;
-	struct batched_entropy *batch;
-	unsigned long next_gen;
-
-	warn_unseeded_randomness();
-
-	if  (!crng_ready()) {
-		_get_random_bytes(&ret, sizeof(ret));
-		return ret;
-	}
-
-	local_lock_irqsave(&batched_entropy_u64.lock, flags);
-	batch = raw_cpu_ptr(&batched_entropy_u64);
-
-	next_gen = READ_ONCE(base_crng.generation);
-	if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
-	    next_gen != batch->generation) {
-		_get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
-		batch->position = 0;
-		batch->generation = next_gen;
-	}
-
-	ret = batch->entropy_u64[batch->position];
-	batch->entropy_u64[batch->position] = 0;
-	++batch->position;
-	local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
-	return ret;
-}
-EXPORT_SYMBOL(get_random_u64);
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
-	.lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
-	.position = UINT_MAX
-};
-
-u32 get_random_u32(void)
-{
-	u32 ret;
-	unsigned long flags;
-	struct batched_entropy *batch;
-	unsigned long next_gen;
-
-	warn_unseeded_randomness();
-
-	if  (!crng_ready()) {
-		_get_random_bytes(&ret, sizeof(ret));
-		return ret;
-	}
-
-	local_lock_irqsave(&batched_entropy_u32.lock, flags);
-	batch = raw_cpu_ptr(&batched_entropy_u32);
-
-	next_gen = READ_ONCE(base_crng.generation);
-	if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
-	    next_gen != batch->generation) {
-		_get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
-		batch->position = 0;
-		batch->generation = next_gen;
-	}
-
-	ret = batch->entropy_u32[batch->position];
-	batch->entropy_u32[batch->position] = 0;
-	++batch->position;
-	local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
-	return ret;
-}
-EXPORT_SYMBOL(get_random_u32);
+DEFINE_BATCHED_ENTROPY(u64)
+DEFINE_BATCHED_ENTROPY(u32)
 
 #ifdef CONFIG_SMP
 /*


Patches currently in stable-queue which might be from Jason@xxxxxxxxx are

queue-5.18/random-remove-ratelimiting-for-in-kernel-unseeded-randomness.patch
queue-5.18/random-fix-sysctl-documentation-nits.patch
queue-5.18/random-help-compiler-out-with-fast_mix-by-using-simpler-arguments.patch
queue-5.18/siphash-use-one-source-of-truth-for-siphash-permutations.patch
queue-5.18/um-use-fallback-for-random_get_entropy-instead-of-zero.patch
queue-5.18/random-order-timer-entropy-functions-below-interrupt-functions.patch
queue-5.18/random-unify-batched-entropy-implementations.patch
queue-5.18/random-make-consistent-use-of-buf-and-len.patch
queue-5.18/random-move-randomize_page-into-mm-where-it-belongs.patch
queue-5.18/random-use-first-128-bits-of-input-as-fast-init.patch
queue-5.18/random-use-proper-return-types-on-get_random_-int-long-_wait.patch
queue-5.18/s390-define-get_cycles-macro-for-arch-override.patch
queue-5.18/timekeeping-add-raw-clock-fallback-for-random_get_entropy.patch
queue-5.18/random-use-static-branch-for-crng_ready.patch
queue-5.18/arm-use-fallback-for-random_get_entropy-instead-of-zero.patch
queue-5.18/mips-use-fallback-for-random_get_entropy-instead-of-just-c0-random.patch
queue-5.18/random-avoid-initializing-twice-in-credit-race.patch
queue-5.18/random-move-initialization-functions-out-of-hot-pages.patch
queue-5.18/random-do-not-pretend-to-handle-premature-next-security-model.patch
queue-5.18/random-do-not-use-batches-when-crng_ready.patch
queue-5.18/m68k-use-fallback-for-random_get_entropy-instead-of-zero.patch
queue-5.18/random-move-initialization-out-of-reseeding-hot-path.patch
queue-5.18/x86-tsc-use-fallback-for-random_get_entropy-instead-of-zero.patch
queue-5.18/random-credit-architectural-init-the-exact-amount.patch
queue-5.18/random-check-for-signals-after-page-of-pool-writes.patch
queue-5.18/random-remove-extern-from-functions-in-header.patch
queue-5.18/random-do-not-use-input-pool-from-hard-irqs.patch
queue-5.18/random-wire-up-fops-splice_-read-write-_iter.patch
queue-5.18/random-insist-on-random_get_entropy-existing-in-order-to-simplify.patch
queue-5.18/powerpc-define-get_cycles-macro-for-arch-override.patch
queue-5.18/parisc-define-get_cycles-macro-for-arch-override.patch
queue-5.18/sparc-use-fallback-for-random_get_entropy-instead-of-zero.patch
queue-5.18/nios2-use-fallback-for-random_get_entropy-instead-of-zero.patch
queue-5.18/init-call-time_init-before-rand_initialize.patch
queue-5.18/riscv-use-fallback-for-random_get_entropy-instead-of-zero.patch
queue-5.18/ia64-define-get_cycles-macro-for-arch-override.patch
queue-5.18/random-handle-latent-entropy-and-command-line-from-random_init.patch
queue-5.18/random-use-proper-jiffies-comparison-macro.patch
queue-5.18/alpha-define-get_cycles-macro-for-arch-override.patch
queue-5.18/random-convert-to-using-fops-read_iter.patch
queue-5.18/xtensa-use-fallback-for-random_get_entropy-instead-of-zero.patch
queue-5.18/random-use-symbolic-constants-for-crng_init-states.patch
queue-5.18/random-convert-to-using-fops-write_iter.patch



[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux