Scheduling a 32-bit application on a 64-bit-only CPU is a bad idea. Ensure that 32-bit applications always take the slow-path when returning to userspace on a system with mismatched support at EL0, so that we can avoid trying to run on a 64-bit-only CPU and force a SIGKILL instead. Signed-off-by: Will Deacon <will@xxxxxxxxxx> --- arch/arm64/kernel/process.c | 21 ++++++++++++++++++++- arch/arm64/kernel/signal.c | 26 ++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 4784011cecac..c45b5f9dd66b 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -542,6 +542,17 @@ static void erratum_1418040_thread_switch(struct task_struct *prev, write_sysreg(val, cntkctl_el1); } +static void compat_thread_switch(struct task_struct *next) +{ + if (!is_compat_thread(task_thread_info(next))) + return; + + if (!system_has_mismatched_32bit_el0()) + return; + + set_tsk_thread_flag(next, TIF_NOTIFY_RESUME); +} + /* * Thread switching. */ @@ -558,6 +569,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, uao_thread_switch(next); ssbs_thread_switch(next); erratum_1418040_thread_switch(prev, next); + compat_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -620,8 +632,15 @@ unsigned long arch_align_stack(unsigned long sp) */ void arch_setup_new_exec(void) { - current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; + unsigned long mmflags = 0; + + if (is_compat_task()) { + mmflags = MMCF_AARCH32; + if (system_has_mismatched_32bit_el0()) + set_tsk_thread_flag(current, TIF_NOTIFY_RESUME); + } + current->mm->context.flags = mmflags; ptrauth_thread_init_user(current); if (task_spec_ssb_noexec(current)) { diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index a8184cad8890..bcb6ca2d9a7c 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -911,6 +911,19 @@ static void do_signal(struct pt_regs *regs) restore_saved_sigmask(); } +static bool cpu_affinity_invalid(struct pt_regs *regs) +{ + if (!compat_user_mode(regs)) + return false; + + /* + * We're preemptible, but a reschedule will cause us to check the + * affinity again. + */ + return !cpumask_test_cpu(raw_smp_processor_id(), + system_32bit_el0_cpumask()); +} + asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { @@ -948,6 +961,19 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, if (thread_flags & _TIF_NOTIFY_RESUME) { tracehook_notify_resume(regs); rseq_handle_notify_resume(NULL, regs); + + /* + * If we reschedule after checking the affinity + * then we must ensure that TIF_NOTIFY_RESUME + * is set so that we check the affinity again. + * Since tracehook_notify_resume() clears the + * flag, ensure that the compiler doesn't move + * it after the affinity check. + */ + barrier(); + + if (cpu_affinity_invalid(regs)) + force_sig(SIGKILL); } if (thread_flags & _TIF_FOREIGN_FPSTATE) -- 2.29.0.rc2.309.g374f81d7ae-goog