Smaller, faster, and a smidge more uniform than %. Signed-off-by: George Spelvin <lkml@xxxxxxx> Cc: Daniel Borkmann <daniel@xxxxxxxxxxxxx> Cc: Alexei Starovoitov <ast@xxxxxxxxxx> Cc: Martin KaFai Lau <kafai@xxxxxx> Cc: Song Liu <songliubraving@xxxxxx> Cc: Yonghong Song <yhs@xxxxxx> Cc: Andrii Nakryiko <andriin@xxxxxx> Cc: bpf@xxxxxxxxxxxxxxx Cc: netdev@xxxxxxxxxxxxxxx --- kernel/bpf/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index af6b738cf435c..61713a7f73d85 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -874,7 +874,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, hdr->pages = pages; hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), PAGE_SIZE - sizeof(*hdr)); - start = (get_random_int() % hole) & ~(alignment - 1); + start = get_random_max32(hole) & ~(alignment - 1); /* Leave a random number of instructions before BPF code. */ *image_ptr = &hdr->image[start]; @@ -947,7 +947,7 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from, bool emit_zext) { struct bpf_insn *to = to_buff; - u32 imm_rnd = get_random_int(); + u32 imm_rnd = get_random_u32(); s16 off; BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); -- 2.26.0