Do not offset mmap base address because of stack randomization if current task does not want randomization. Note that x86 already implements this behaviour. Signed-off-by: Alexandre Ghiti <alex@xxxxxxxx> Acked-by: Kees Cook <keescook@xxxxxxxxxxxx> Reviewed-by: Christoph Hellwig <hch@xxxxxx> --- arch/arm64/mm/mmap.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index ed4f9915f2b8..ac89686c4af8 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -65,7 +65,11 @@ unsigned long arch_mmap_rnd(void) static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) { unsigned long gap = rlim_stack->rlim_cur; - unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap; + unsigned long pad = stack_guard_gap; + + /* Account for stack randomization if necessary */ + if (current->flags & PF_RANDOMIZE) + pad += (STACK_RND_MASK << PAGE_SHIFT); /* Values close to RLIM_INFINITY can overflow. */ if (gap + pad > gap) -- 2.20.1