From: Mark Rutland <mark.rutland@xxxxxxx> commit 064dbfb4169141943ec7d9dbfd02974dd008f2ce upstream. For various reasons we'd like to convert the bulk of arm64's exception triage logic to C. As a step towards that, this patch converts the EL1 and EL0 IRQ+FIQ triage logic to C. Separate C functions are added for the native and compat cases so that in subsequent patches we can handle native/compat differences in C. Since the triage functions can now call arm64_apply_bp_hardening() directly, the do_el0_irq_bp_hardening() wrapper function is removed. Since the user_exit_irqoff macro is now unused, it is removed. The user_enter_irqoff macro is still used by the ret_to_user code, and cannot be removed at this time. Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx> Acked-by: Catalin Marinas <catalin.marinas@xxxxxxx> Acked-by: Marc Zyngier <maz@xxxxxxxxxx> Reviewed-by: Joey Gouly <joey.gouly@xxxxxxx> Cc: James Morse <james.morse@xxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Link: https://lore.kernel.org/r/20210607094624.34689-8-mark.rutland@xxxxxxx Signed-off-by: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Xiang Yang <xiangyang3@xxxxxxxxxx> --- arch/arm64/include/asm/exception.h | 3 ++ arch/arm64/kernel/entry-common.c | 68 +++++++++++++++++++++++++++++- arch/arm64/kernel/entry.S | 54 +++--------------------- 3 files changed, 77 insertions(+), 48 deletions(-) diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index def53267f4a2..846ca8132f76 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -26,6 +26,9 @@ static inline u32 disr_to_esr(u64 disr) return esr; } +asmlinkage void el1_irq_handler(struct pt_regs *regs); +asmlinkage void el0_irq_handler(struct pt_regs *regs); +asmlinkage void el0_irq_compat_handler(struct pt_regs *regs); asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs); asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs); asmlinkage void call_on_irq_stack(struct pt_regs *regs, diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c index a533f4827253..340249c65007 100644 --- a/arch/arm64/kernel/entry-common.c +++ b/arch/arm64/kernel/entry-common.c @@ -19,6 +19,8 @@ #include <asm/exception.h> #include <asm/kprobes.h> #include <asm/mmu.h> +#include <asm/processor.h> +#include <asm/stacktrace.h> #include <asm/sysreg.h> /* @@ -113,7 +115,7 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) exit_to_kernel_mode(regs); } -asmlinkage void __sched arm64_preempt_schedule_irq(void) +static void __sched arm64_preempt_schedule_irq(void) { lockdep_assert_irqs_disabled(); @@ -141,6 +143,42 @@ static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr) exit_to_kernel_mode(regs); } +static void do_interrupt_handler(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + if (on_thread_stack()) + call_on_irq_stack(regs, handler); + else + handler(regs); +} + +extern void (*handle_arch_irq)(struct pt_regs *); + +static void noinstr el1_interrupt(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); + + enter_el1_irq_or_nmi(regs); + do_interrupt_handler(regs, handler); + + /* + * Note: thread_info::preempt_count includes both thread_info::count + * and thread_info::need_resched, and is not equivalent to + * preempt_count(). + */ + if (IS_ENABLED(CONFIG_PREEMPTION) && + READ_ONCE(current_thread_info()->preempt_count) == 0) + arm64_preempt_schedule_irq(); + + exit_el1_irq_or_nmi(regs); +} + +asmlinkage void noinstr el1_irq_handler(struct pt_regs *regs) +{ + el1_interrupt(regs, handle_arch_irq); +} + static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr) { unsigned long far = read_sysreg(far_el1); @@ -445,6 +483,29 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs) } } +static void noinstr el0_interrupt(struct pt_regs *regs, + void (*handler)(struct pt_regs *)) +{ + enter_from_user_mode(); + + write_sysreg(DAIF_PROCCTX_NOIRQ, daif); + + if (regs->pc & BIT(55)) + arm64_apply_bp_hardening(); + + do_interrupt_handler(regs, handler); +} + +static void noinstr __el0_irq_handler_common(struct pt_regs *regs) +{ + el0_interrupt(regs, handle_arch_irq); +} + +asmlinkage void noinstr el0_irq_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} + #ifdef CONFIG_COMPAT static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) { @@ -453,6 +514,11 @@ static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr) do_el0_cp15(esr, regs); } +asmlinkage void noinstr el0_irq_compat_handler(struct pt_regs *regs) +{ + __el0_irq_handler_common(regs); +} + static void noinstr el0_svc_compat(struct pt_regs *regs) { enter_from_user_mode(); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 8b17d83e8c87..b0d102669dde 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -429,49 +429,6 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0) SYM_CODE_END(__swpan_exit_el0) #endif - .macro irq_stack_entry - mov x19, sp // preserve the original sp -#ifdef CONFIG_SHADOW_CALL_STACK - mov x24, scs_sp // preserve the original shadow stack -#endif - - /* - * Compare sp with the base of the task stack. - * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, - * and should switch to the irq stack. - */ - ldr x25, [tsk, TSK_STACK] - eor x25, x25, x19 - and x25, x25, #~(THREAD_SIZE - 1) - cbnz x25, 9998f - - ldr_this_cpu x25, irq_stack_ptr, x26 - mov x26, #IRQ_STACK_SIZE - add x26, x25, x26 - - /* switch to the irq stack */ - mov sp, x26 - -#ifdef CONFIG_SHADOW_CALL_STACK - /* also switch to the irq shadow stack */ - adr_this_cpu scs_sp, irq_shadow_call_stack, x26 -#endif - -9998: - .endm - - /* - * The callee-saved regs (x19-x29) should be preserved between - * irq_stack_entry and irq_stack_exit, but note that kernel_entry - * uses x20-x23 to store data for later use. - */ - .macro irq_stack_exit - mov sp, x19 -#ifdef CONFIG_SHADOW_CALL_STACK - mov scs_sp, x24 -#endif - .endm - /* GPRs used by entry code */ tsk .req x28 // current thread_info @@ -675,7 +632,8 @@ SYM_CODE_END(el1_sync) .align 6 SYM_CODE_START_LOCAL_NOALIGN(el1_irq) kernel_entry 1 - el1_interrupt_handler handle_arch_irq + mov x0, sp + bl el1_irq_handler kernel_exit 1 SYM_CODE_END(el1_irq) @@ -702,7 +660,9 @@ SYM_CODE_END(el0_sync_compat) .align 6 SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat) kernel_entry 0, 32 - b el0_irq_naked + mov x0, sp + bl el0_irq_compat_handler + b ret_to_user SYM_CODE_END(el0_irq_compat) SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat) @@ -714,8 +674,8 @@ SYM_CODE_END(el0_error_compat) .align 6 SYM_CODE_START_LOCAL_NOALIGN(el0_irq) kernel_entry 0 -el0_irq_naked: - el0_interrupt_handler handle_arch_irq + mov x0, sp + bl el0_irq_handler b ret_to_user SYM_CODE_END(el0_irq) -- 2.34.1