On Sat, 21 Jan 2023 21:46:06 -0500 Tianyu Lan <ltykernel@xxxxxxxxx> wrote: > From: Ashish Kalra <ashish.kalra@xxxxxxx> > > Add checks in interrupt exit code paths in case of returns > to user mode to check if currently executing the #HV handler > then don't follow the irqentry_exit_to_user_mode path as > that can potentially cause the #HV handler to be > preempted and rescheduled on another CPU. Rescheduled #HV > handler on another cpu will cause interrupts to be handled > on a different cpu than the injected one, causing > invalid EOIs and missed/lost guest interrupts and > corresponding hangs and/or per-cpu IRQs handled on > non-intended cpu. > Why doesn't this problem happen in #VC handler? As #VC handler doesn't have this special handling. > Signed-off-by: Ashish Kalra <ashish.kalra@xxxxxxx> > --- > arch/x86/include/asm/idtentry.h | 66 +++++++++++++++++++++++++++++++++ > arch/x86/kernel/sev.c | 30 +++++++++++++++ > 2 files changed, 96 insertions(+) > > diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h > index 652fea10d377..45b47132be7c 100644 > --- a/arch/x86/include/asm/idtentry.h > +++ b/arch/x86/include/asm/idtentry.h > @@ -13,6 +13,10 @@ > > #include <asm/irq_stack.h> > > +#ifdef CONFIG_AMD_MEM_ENCRYPT > +noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, irqentry_state_t state); > +#endif > + > /** > * DECLARE_IDTENTRY - Declare functions for simple IDT entry points > * No error code pushed by hardware > @@ -176,6 +180,7 @@ __visible noinstr void func(struct pt_regs *regs, unsigned long error_code) > #define DECLARE_IDTENTRY_IRQ(vector, func) \ > DECLARE_IDTENTRY_ERRORCODE(vector, func) > > +#ifndef CONFIG_AMD_MEM_ENCRYPT > /** > * DEFINE_IDTENTRY_IRQ - Emit code for device interrupt IDT entry points > * @func: Function name of the entry point > @@ -205,6 +210,26 @@ __visible noinstr void func(struct pt_regs *regs, \ > } \ > \ > static noinline void __##func(struct pt_regs *regs, u32 vector) > +#else > + > +#define DEFINE_IDTENTRY_IRQ(func) \ > +static void __##func(struct pt_regs *regs, u32 vector); \ > + \ > +__visible noinstr void func(struct pt_regs *regs, \ > + unsigned long error_code) \ > +{ \ > + irqentry_state_t state = irqentry_enter(regs); \ > + u32 vector = (u32)(u8)error_code; \ > + \ > + instrumentation_begin(); \ > + kvm_set_cpu_l1tf_flush_l1d(); \ > + run_irq_on_irqstack_cond(__##func, regs, vector); \ > + instrumentation_end(); \ > + irqentry_exit_hv_cond(regs, state); \ > +} \ > + \ > +static noinline void __##func(struct pt_regs *regs, u32 vector) > +#endif > > /** > * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points > @@ -221,6 +246,7 @@ static noinline void __##func(struct pt_regs *regs, u32 vector) > #define DECLARE_IDTENTRY_SYSVEC(vector, func) \ > DECLARE_IDTENTRY(vector, func) > > +#ifndef CONFIG_AMD_MEM_ENCRYPT > /** > * DEFINE_IDTENTRY_SYSVEC - Emit code for system vector IDT entry points > * @func: Function name of the entry point > @@ -245,6 +271,26 @@ __visible noinstr void func(struct pt_regs *regs) \ > } \ > \ > static noinline void __##func(struct pt_regs *regs) > +#else > + > +#define DEFINE_IDTENTRY_SYSVEC(func) \ > +static void __##func(struct pt_regs *regs); \ > + \ > +__visible noinstr void func(struct pt_regs *regs) \ > +{ \ > + irqentry_state_t state = irqentry_enter(regs); \ > + \ > + instrumentation_begin(); \ > + kvm_set_cpu_l1tf_flush_l1d(); \ > + run_sysvec_on_irqstack_cond(__##func, regs); \ > + instrumentation_end(); \ > + irqentry_exit_hv_cond(regs, state); \ > +} \ > + \ > +static noinline void __##func(struct pt_regs *regs) > +#endif > + > +#ifndef CONFIG_AMD_MEM_ENCRYPT > > /** > * DEFINE_IDTENTRY_SYSVEC_SIMPLE - Emit code for simple system vector IDT > @@ -274,6 +320,26 @@ __visible noinstr void func(struct pt_regs *regs) \ > } \ > \ > static __always_inline void __##func(struct pt_regs *regs) > +#else > + > +#define DEFINE_IDTENTRY_SYSVEC_SIMPLE(func) \ > +static __always_inline void __##func(struct pt_regs *regs); \ > + \ > +__visible noinstr void func(struct pt_regs *regs) \ > +{ \ > + irqentry_state_t state = irqentry_enter(regs); \ > + \ > + instrumentation_begin(); \ > + __irq_enter_raw(); \ > + kvm_set_cpu_l1tf_flush_l1d(); \ > + __##func(regs); \ > + __irq_exit_raw(); \ > + instrumentation_end(); \ > + irqentry_exit_hv_cond(regs, state); \ > +} \ > + \ > +static __always_inline void __##func(struct pt_regs *regs) > +#endif > > /** > * DECLARE_IDTENTRY_XENCB - Declare functions for XEN HV callback entry point > diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c > index b1a98c2a52f8..23f15e95838b 100644 > --- a/arch/x86/kernel/sev.c > +++ b/arch/x86/kernel/sev.c > @@ -147,6 +147,10 @@ struct sev_hv_doorbell_page { > > struct sev_snp_runtime_data { > struct sev_hv_doorbell_page hv_doorbell_page; > + /* > + * Indication that we are currently handling #HV events. > + */ > + bool hv_handling_events; > }; > > static DEFINE_PER_CPU(struct sev_snp_runtime_data*, snp_runtime_data); > @@ -200,6 +204,8 @@ static void do_exc_hv(struct pt_regs *regs) > union hv_pending_events pending_events; > u8 vector; > > + this_cpu_read(snp_runtime_data)->hv_handling_events = true; > + > while (sev_hv_pending()) { > pending_events.events = xchg( > &sev_snp_current_doorbell_page()->pending_events.events, > @@ -234,6 +240,8 @@ static void do_exc_hv(struct pt_regs *regs) > common_interrupt(regs, pending_events.vector); > } > } > + > + this_cpu_read(snp_runtime_data)->hv_handling_events = false; > } > > static __always_inline bool on_vc_stack(struct pt_regs *regs) > @@ -2529,3 +2537,25 @@ static int __init snp_init_platform_device(void) > return 0; > } > device_initcall(snp_init_platform_device); > + > +noinstr void irqentry_exit_hv_cond(struct pt_regs *regs, irqentry_state_t state) > +{ > + /* > + * Check whether this returns to user mode, if so and if > + * we are currently executing the #HV handler then we don't > + * want to follow the irqentry_exit_to_user_mode path as > + * that can potentially cause the #HV handler to be > + * preempted and rescheduled on another CPU. Rescheduled #HV > + * handler on another cpu will cause interrupts to be handled > + * on a different cpu than the injected one, causing > + * invalid EOIs and missed/lost guest interrupts and > + * corresponding hangs and/or per-cpu IRQs handled on > + * non-intended cpu. > + */ > + if (user_mode(regs) && > + this_cpu_read(snp_runtime_data)->hv_handling_events) > + return; > + > + /* follow normal interrupt return/exit path */ > + irqentry_exit(regs, state); > +}