From: Ard Biesheuvel <ardb@xxxxxxxxxx> commit 59b37fe52f49955791a460752c37145f1afdcad1 upstream. Instead of reloading the shadow call stack pointer from the ordinary stack, which may be vulnerable to the kind of gadget based attacks shadow call stacks were designed to prevent, let's store a task's shadow call stack pointer in the task struct when switching to the shadow IRQ stack. Given that currently, the task_struct::scs_sp field is only used to preserve the shadow call stack pointer while a task is scheduled out or running in user space, reusing this field to preserve and restore it while running off the IRQ stack must be safe, as those occurrences are guaranteed to never overlap. (The stack switching logic only switches stacks when running from the task stack, and so the value being saved here always corresponds to the task mode shadow stack) While at it, fold a mov/add/mov sequence into a single add. Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> Reviewed-by: Kees Cook <keescook@xxxxxxxxxxxx> Acked-by: Mark Rutland <mark.rutland@xxxxxxx> Link: https://lore.kernel.org/r/20230109174800.3286265-3-ardb@xxxxxxxxxx Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx> [ardb: v5.10 backport, which doesn't have call_on_irq_stack() yet *] Signed-off-by: Ard Biesheuvel <ardb@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/arm64/kernel/entry.S | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -431,9 +431,7 @@ SYM_CODE_END(__swpan_exit_el0) .macro irq_stack_entry mov x19, sp // preserve the original sp -#ifdef CONFIG_SHADOW_CALL_STACK - mov x24, scs_sp // preserve the original shadow stack -#endif + scs_save tsk // preserve the original shadow stack /* * Compare sp with the base of the task stack. @@ -467,9 +465,7 @@ SYM_CODE_END(__swpan_exit_el0) */ .macro irq_stack_exit mov sp, x19 -#ifdef CONFIG_SHADOW_CALL_STACK - mov scs_sp, x24 -#endif + scs_load_current .endm /* GPRs used by entry code */