From: Tianyu Lan <tiala@xxxxxxxxxxxxx> Add check_hv_pending() and check_hv_pending_after_irq() to check queued #HV event when irq is disabled. Signed-off-by: Tianyu Lan <tiala@xxxxxxxxxxxxx> --- arch/x86/entry/entry_64.S | 18 ++++++++++++++++ arch/x86/include/asm/irqflags.h | 14 +++++++++++- arch/x86/kernel/sev.c | 38 +++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 1 deletion(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 653b1f10699b..147b850babf6 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -1019,6 +1019,15 @@ SYM_CODE_END(paranoid_entry) * R15 - old SPEC_CTRL */ SYM_CODE_START_LOCAL(paranoid_exit) +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* + * If a #HV was delivered during execution and interrupts were + * disabled, then check if it can be handled before the iret + * (which may re-enable interrupts). + */ + mov %rsp, %rdi + call check_hv_pending +#endif UNWIND_HINT_REGS /* @@ -1143,6 +1152,15 @@ SYM_CODE_START(error_entry) SYM_CODE_END(error_entry) SYM_CODE_START_LOCAL(error_return) +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* + * If a #HV was delivered during execution and interrupts were + * disabled, then check if it can be handled before the iret + * (which may re-enable interrupts). + */ + mov %rsp, %rdi + call check_hv_pending +#endif UNWIND_HINT_REGS DEBUG_ENTRY_ASSERT_IRQS_OFF testb $3, CS(%rsp) diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index 8c5ae649d2df..d09ec6d76591 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -11,6 +11,10 @@ /* * Interrupt control: */ +#ifdef CONFIG_AMD_MEM_ENCRYPT +void check_hv_pending(struct pt_regs *regs); +void check_hv_pending_irq_enable(void); +#endif /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */ extern inline unsigned long native_save_fl(void); @@ -40,12 +44,20 @@ static __always_inline void native_irq_disable(void) static __always_inline void native_irq_enable(void) { asm volatile("sti": : :"memory"); +#ifdef CONFIG_AMD_MEM_ENCRYPT + check_hv_pending_irq_enable(); +#endif } static __always_inline void native_safe_halt(void) { mds_idle_clear_cpu_buffers(); - asm volatile("sti; hlt": : :"memory"); + asm volatile("sti": : :"memory"); + +#ifdef CONFIG_AMD_MEM_ENCRYPT + check_hv_pending_irq_enable(); +#endif + asm volatile("hlt": : :"memory"); } static __always_inline void native_halt(void) diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index e25445de0957..ff5eab48bfe2 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -181,6 +181,44 @@ void noinstr __sev_es_ist_enter(struct pt_regs *regs) this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist); } +static void do_exc_hv(struct pt_regs *regs) +{ + /* Handle #HV exception. */ +} + +void check_hv_pending(struct pt_regs *regs) +{ + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + if ((regs->flags & X86_EFLAGS_IF) == 0) + return; + + do_exc_hv(regs); +} + +void check_hv_pending_irq_enable(void) +{ + struct pt_regs regs; + + if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) + return; + + memset(®s, 0, sizeof(struct pt_regs)); + asm volatile("movl %%cs, %%eax;" : "=a" (regs.cs)); + asm volatile("movl %%ss, %%eax;" : "=a" (regs.ss)); + regs.orig_ax = 0xffffffff; + regs.flags = native_save_fl(); + + /* + * Disable irq when handle pending #HV events after + * re-enabling irq. + */ + asm volatile("cli" : : : "memory"); + do_exc_hv(®s); + asm volatile("sti" : : : "memory"); +} + void noinstr __sev_es_ist_exit(void) { unsigned long ist; -- 2.25.1