In an effort to drop __credit_entropy_bits_fast() in favor of the new __queue_entropy()/__dispatch_queued_entropy_fast() API, convert add_interrupt_randomness() from the former to the latter. There is no change in functionality at this point, because __credit_entropy_bits_fast() has already been reimplemented on top of the new API before. Signed-off-by: Nicolai Stange <nstange@xxxxxxx> --- drivers/char/random.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/drivers/char/random.c b/drivers/char/random.c index e8c86abde901..bd3774c6be4b 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1512,6 +1512,7 @@ void add_interrupt_randomness(int irq, int irq_flags) unsigned long seed; int credit = 0; bool reseed; + struct queued_entropy q = { 0 }; if (cycles == 0) cycles = get_reg(fast_pool, regs); @@ -1546,24 +1547,27 @@ void add_interrupt_randomness(int irq, int irq_flags) if (!spin_trylock(&r->lock)) return; - fast_pool->last = now; - __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); - /* * If we have architectural seed generator, produce a seed and - * add it to the pool. For the sake of paranoia don't let the - * architectural seed generator dominate the input from the - * interrupt noise. + * add it to the pool further below. For the sake of paranoia + * don't let the architectural seed generator dominate the + * input from the interrupt noise. */ - if (arch_get_random_seed_long(&seed)) { - __mix_pool_bytes(r, &seed, sizeof(seed)); - credit = 1; - } + credit = !!arch_get_random_long(&seed); + fast_pool->last = now; fast_pool->count = 0; - /* award one bit for the contents of the fast pool */ - reseed = __credit_entropy_bits_fast(r, credit + 1); + __queue_entropy(r, &q, (credit + 1) << ENTROPY_SHIFT); + __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool)); + if (credit) { + /* + * A seed has been obtained from + * arch_get_random_seed_long() above, mix it in. + */ + __mix_pool_bytes(r, &seed, sizeof(seed)); + } + reseed = __dispatch_queued_entropy_fast(r, &q); spin_unlock(&r->lock); if (reseed) crng_reseed(&primary_crng, r); -- 2.26.2