From: "Jason A. Donenfeld" <Jason@xxxxxxxxx> commit 791332b3cbb080510954a4c152ce02af8832eac9 upstream. Now that fast_mix() has more than one caller, gcc no longer inlines it. That's fine. But it also doesn't handle the compound literal argument we pass it very efficiently, nor does it handle the loop as well as it could. So just expand the code to spell out this function so that it generates the same code as it did before. Performance-wise, this now behaves as it did before the last commit. The difference in actual code size on x86 is 45 bytes, which is less than a cache line. Signed-off-by: Jason A. Donenfeld <Jason@xxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- drivers/char/random.c | 44 +++++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 21 deletions(-) --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1029,25 +1029,30 @@ static DEFINE_PER_CPU(struct fast_pool, * and therefore this has no security on its own. s represents the * four-word SipHash state, while v represents a two-word input. */ -static void fast_mix(unsigned long s[4], const unsigned long v[2]) +static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2) { - size_t i; - - for (i = 0; i < 2; ++i) { - s[3] ^= v[i]; #ifdef CONFIG_64BIT - s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32); - s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2]; - s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0]; - s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32); +#define PERM() do { \ + s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32); \ + s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2]; \ + s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0]; \ + s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32); \ +} while (0) #else - s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16); - s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2]; - s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0]; - s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16); +#define PERM() do { \ + s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16); \ + s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2]; \ + s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0]; \ + s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16); \ +} while (0) #endif - s[0] ^= v[i]; - } + + s[3] ^= v1; + PERM(); + s[0] ^= v1; + s[3] ^= v2; + PERM(); + s[0] ^= v2; } #ifdef CONFIG_SMP @@ -1117,10 +1122,8 @@ void add_interrupt_randomness(int irq) struct pt_regs *regs = get_irq_regs(); unsigned int new_count; - fast_mix(fast_pool->pool, (unsigned long[2]){ - entropy, - (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq) - }); + fast_mix(fast_pool->pool, entropy, + (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq)); new_count = ++fast_pool->count; if (new_count & MIX_INFLIGHT) @@ -1160,8 +1163,7 @@ static void add_timer_randomness(struct * sometime after, so mix into the fast pool. */ if (in_hardirq()) { - fast_mix(this_cpu_ptr(&irq_randomness)->pool, - (unsigned long[2]){ entropy, num }); + fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num); } else { spin_lock_irqsave(&input_pool.lock, flags); _mix_pool_bytes(&entropy, sizeof(entropy));