This reverts commit 3f225f29c69c13ce1cbdb1d607a42efeef080056. The shadow call stack for irq now is stored in current task's thread info in irq_stack_entry. There is a possibility that we have some soft irqs pending at the end of hard irq, and when we process softirq with the irq enabled, irq_stack_entry will enter again and overwrite the shadow call stack whitch stored in current task's thread info, leading to the incorrect shadow call stack restoration for the first entry of the hard IRQ, then the system end up with a panic. task A | task A -------------------------------------+------------------------------------ el1_irq //irq1 enter | irq_handler //save scs_sp1 | gic_handle_irq | irq_exit | __do_softirq | | el1_irq //irq2 enter | irq_handler //save scs_sp2 | //overwrite scs_sp1 | ... | irq_stack_exit //restore scs_sp2 irq_stack_exit //restore wrong | //scs_sp2 | Fixes: 3f225f29c69c ("arm64: Stash shadow stack pointer in the task struct on interrupt") Cc: Ard Biesheuvel <ardb@xxxxxxxxxx> Signed-off-by: Xiang Yang <xiangyang3@xxxxxxxxxx> --- arch/arm64/kernel/entry.S | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a94acea770c7..020a455824be 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -431,7 +431,9 @@ SYM_CODE_END(__swpan_exit_el0) .macro irq_stack_entry mov x19, sp // preserve the original sp - scs_save tsk // preserve the original shadow stack +#ifdef CONFIG_SHADOW_CALL_STACK + mov x24, scs_sp // preserve the original shadow stack +#endif /* * Compare sp with the base of the task stack. @@ -465,7 +467,9 @@ SYM_CODE_END(__swpan_exit_el0) */ .macro irq_stack_exit mov sp, x19 - scs_load_current +#ifdef CONFIG_SHADOW_CALL_STACK + mov scs_sp, x24 +#endif .endm /* GPRs used by entry code */ -- 2.34.1