On Wed, Mar 25, 2020 at 5:13 PM <glider@xxxxxxxxxx> wrote: > > Add assembly helpers to entry_64.S that invoke hooks from kmsan_entry.c and > notify KMSAN about interrupts. > Also call these hooks from kernel/softirq.c > This is needed to switch between several KMSAN contexts holding function > parameter metadata. > > Signed-off-by: Alexander Potapenko <glider@xxxxxxxxxx> > To: Alexander Potapenko <glider@xxxxxxxxxx> > Cc: Jens Axboe <axboe@xxxxxxxxx> > Cc: Andy Lutomirski <luto@xxxxxxxxxx> > Cc: Vegard Nossum <vegard.nossum@xxxxxxxxxx> > Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> > Cc: Marco Elver <elver@xxxxxxxxxx> > Cc: Andrey Konovalov <andreyknvl@xxxxxxxxxx> > Cc: Christoph Hellwig <hch@xxxxxx> > Cc: linux-mm@xxxxxxxxx Acked-by: Andrey Konovalov <andreyknvl@xxxxxxxxxx> > --- > > v4: > - moved softirq changes to this patch > > Change-Id: I3037d51672fe69d09e588b27adb2d9fdc6ad3a7d > --- > arch/x86/entry/entry_64.S | 16 ++++++++++++++++ > kernel/softirq.c | 5 +++++ > 2 files changed, 21 insertions(+) > > diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S > index 0e9504fabe526..03f5a32b0af4d 100644 > --- a/arch/x86/entry/entry_64.S > +++ b/arch/x86/entry/entry_64.S > @@ -35,6 +35,7 @@ > #include <asm/asm.h> > #include <asm/smap.h> > #include <asm/pgtable_types.h> > +#include <asm/kmsan.h> > #include <asm/export.h> > #include <asm/frame.h> > #include <asm/nospec-branch.h> > @@ -575,6 +576,7 @@ SYM_CODE_START(interrupt_entry) > > 1: > ENTER_IRQ_STACK old_rsp=%rdi save_ret=1 > + KMSAN_INTERRUPT_ENTER > /* We entered an interrupt context - irqs are off: */ > TRACE_IRQS_OFF > > @@ -604,12 +606,14 @@ SYM_CODE_START_LOCAL(common_interrupt) > addq $-0x80, (%rsp) /* Adjust vector to [-256, -1] range */ > call interrupt_entry > UNWIND_HINT_REGS indirect=1 > + KMSAN_UNPOISON_PT_REGS > call do_IRQ /* rdi points to pt_regs */ > /* 0(%rsp): old RSP */ > ret_from_intr: > DISABLE_INTERRUPTS(CLBR_ANY) > TRACE_IRQS_OFF > > + KMSAN_INTERRUPT_EXIT > LEAVE_IRQ_STACK > > testb $3, CS(%rsp) > @@ -801,6 +805,7 @@ SYM_CODE_START(\sym) > .Lcommon_\sym: > call interrupt_entry > UNWIND_HINT_REGS indirect=1 > + KMSAN_UNPOISON_PT_REGS > call \do_sym /* rdi points to pt_regs */ > jmp ret_from_intr > SYM_CODE_END(\sym) > @@ -908,15 +913,18 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt > > .if \shift_ist != -1 > subq $\ist_offset, CPU_TSS_IST(\shift_ist) > + KMSAN_IST_ENTER(\shift_ist) > .endif > > .if \read_cr2 > movq %r12, %rdx /* Move CR2 into 3rd argument */ > .endif > > + KMSAN_UNPOISON_PT_REGS > call \do_sym > > .if \shift_ist != -1 > + KMSAN_IST_EXIT(\shift_ist) > addq $\ist_offset, CPU_TSS_IST(\shift_ist) > .endif > > @@ -1079,7 +1087,9 @@ SYM_FUNC_START(do_softirq_own_stack) > pushq %rbp > mov %rsp, %rbp > ENTER_IRQ_STACK regs=0 old_rsp=%r11 > + KMSAN_SOFTIRQ_ENTER > call __do_softirq > + KMSAN_SOFTIRQ_EXIT > LEAVE_IRQ_STACK regs=0 > leaveq > ret > @@ -1466,9 +1476,12 @@ SYM_CODE_START(nmi) > * done with the NMI stack. > */ > > + KMSAN_NMI_ENTER > movq %rsp, %rdi > movq $-1, %rsi > + KMSAN_UNPOISON_PT_REGS > call do_nmi > + KMSAN_NMI_EXIT > > /* > * Return back to user mode. We must *not* do the normal exit > @@ -1678,10 +1691,13 @@ end_repeat_nmi: > call paranoid_entry > UNWIND_HINT_REGS > > + KMSAN_NMI_ENTER > /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ > movq %rsp, %rdi > movq $-1, %rsi > + KMSAN_UNPOISON_PT_REGS > call do_nmi > + KMSAN_NMI_EXIT > > /* Always restore stashed CR3 value (see paranoid_entry) */ > RESTORE_CR3 scratch_reg=%r15 save_reg=%r14 > diff --git a/kernel/softirq.c b/kernel/softirq.c > index 0427a86743a46..98c5f4062cbfe 100644 > --- a/kernel/softirq.c > +++ b/kernel/softirq.c > @@ -11,6 +11,7 @@ > > #include <linux/export.h> > #include <linux/kernel_stat.h> > +#include <linux/kmsan.h> > #include <linux/interrupt.h> > #include <linux/init.h> > #include <linux/mm.h> > @@ -370,7 +371,9 @@ static inline void invoke_softirq(void) > * it is the irq stack, because it should be near empty > * at this stage. > */ > + kmsan_context_enter(); > __do_softirq(); > + kmsan_context_exit(); > #else > /* > * Otherwise, irq_exit() is called on the task stack that can > @@ -600,7 +603,9 @@ static void run_ksoftirqd(unsigned int cpu) > * We can safely run softirq on inline stack, as we are not deep > * in the task stack here. > */ > + kmsan_context_enter(); > __do_softirq(); > + kmsan_context_exit(); > local_irq_enable(); > cond_resched(); > return; > -- > 2.25.1.696.g5e7596f4ac-goog >