This test tests a corner case in which KVM doesn't preserve STI interrupt shadow when #SMI arrives during it. Due to apparent fact that STI interrupt shadow blocks real interrupts as well, and thus prevents a vCPU kick to make the CPU enter SMM, during the interrupt shadow, a workaround was used: An instruction which gets VMexit anyway, but retried by KVM is used in the interrupt shadow. While emulating such instruction KVM doesn't reset the interrupt shadow (because it retries it), but it can notice the pending #SMI and enter SMM, thus the test tests that interrupt shadow in this case is preserved. Signed-off-by: Maxim Levitsky <mlevitsk@xxxxxxxxxx> --- x86/Makefile.common | 3 +- x86/Makefile.x86_64 | 1 + x86/smm_int_window.c | 118 +++++++++++++++++++++++++++++++++++++++++++ x86/unittests.cfg | 5 ++ 4 files changed, 126 insertions(+), 1 deletion(-) create mode 100644 x86/smm_int_window.c diff --git a/x86/Makefile.common b/x86/Makefile.common index 365e199f..698a48ab 100644 --- a/x86/Makefile.common +++ b/x86/Makefile.common @@ -87,7 +87,8 @@ tests-common = $(TEST_DIR)/vmexit.$(exe) $(TEST_DIR)/tsc.$(exe) \ $(TEST_DIR)/emulator.$(exe) \ $(TEST_DIR)/eventinj.$(exe) \ $(TEST_DIR)/smap.$(exe) \ - $(TEST_DIR)/umip.$(exe) + $(TEST_DIR)/umip.$(exe) \ + $(TEST_DIR)/smm_int_window.$(exe) # The following test cases are disabled when building EFI tests because they # use absolute addresses in their inline assembly code, which cannot compile diff --git a/x86/Makefile.x86_64 b/x86/Makefile.x86_64 index f483dead..5d66b201 100644 --- a/x86/Makefile.x86_64 +++ b/x86/Makefile.x86_64 @@ -35,6 +35,7 @@ tests += $(TEST_DIR)/pks.$(exe) tests += $(TEST_DIR)/pmu_lbr.$(exe) tests += $(TEST_DIR)/pmu_pebs.$(exe) + ifeq ($(CONFIG_EFI),y) tests += $(TEST_DIR)/amd_sev.$(exe) endif diff --git a/x86/smm_int_window.c b/x86/smm_int_window.c new file mode 100644 index 00000000..d3a2b073 --- /dev/null +++ b/x86/smm_int_window.c @@ -0,0 +1,118 @@ +#include "libcflat.h" +#include "apic.h" +#include "processor.h" +#include "smp.h" +#include "isr.h" +#include "asm/barrier.h" +#include "alloc_page.h" +#include "asm/page.h" + +#define SELF_INT_VECTOR 0xBB +#define MEM_ALLOC_ORDER 16 + +volatile int bad_int_received; +volatile bool test_ended; +volatile bool send_smi; + +extern unsigned long shadow_label; + +static void dummy_ipi_isr(isr_regs_t *regs) +{ + /* + * Test that we never get the interrupt on the instruction which + * is in interrupt shadow + */ + if (regs->rip == (unsigned long)&shadow_label) + bad_int_received++; + eoi(); +} + +static void vcpu1_code(void *data) +{ + /* + * Flood vCPU0 with #SMIs + * + * Note that kvm unit tests run with seabios and its #SMI handler + * is only installed on vCPU0 (BSP). + * Sending #SMI to any other CPU will crash the guest + */ + setup_vm(); + + while (!test_ended) { + if (send_smi) { + apic_icr_write(APIC_INT_ASSERT | APIC_DEST_PHYSICAL | APIC_DM_SMI, 0); + send_smi = false; + } + cpu_relax(); + } +} + +int main(void) +{ + int i; + unsigned volatile char *mem; + + setup_vm(); + cli(); + + mem = alloc_pages_flags(MEM_ALLOC_ORDER, AREA_ANY | FLAG_DONTZERO); + assert(mem); + + handle_irq(SELF_INT_VECTOR, dummy_ipi_isr); + on_cpu_async(1, vcpu1_code, NULL); + + for (i = 0 ; i < (1 << MEM_ALLOC_ORDER) && !bad_int_received ; i++) { + + apic_icr_write(APIC_INT_ASSERT | APIC_DEST_PHYSICAL | + APIC_DM_FIXED | SELF_INT_VECTOR, 0); + + /* in case the sender is still sending #SMI, wait for it*/ + while (send_smi) + ; + + /* ask the peer vCPU to send SMI to us */ + send_smi = true; + + /* + * The below memory access should never get an interrupt because + * it is in an interrupt shadow from the STI. + * + * Note that seems that even if a real interrupt happens, it will + * still not interrupt this instruction, thus vCPU kick from + * vCPU1, when it attempts to send #SMI to us is itself not enough, + * to trigger the switch to SMM mode at this point. + + * Therefore STI;NOP;CLI sequence itself doesn't lead to #SMI happening + * in between these instructions. + * + * So instead of NOP, make an instruction that accesses a fresh memory, + * which will force the CPU to #VMEXIT and just before resuming the guest, + * KVM might notice the incoming #SMI, and enter the SMM + * with a still pending interrupt shadow. + * + * Also note that, just an #VMEXITing instruction like CPUID + * can't be used here, because KVM itself will emulate it, + * and clear the interrupt shadow, prior to entering the SMM. + * + * Test that in this case, the interrupt shadow is preserved, + * which means that upon exit from #SMI handler, the instruction + * should still not get the pending interrupt + */ + + asm volatile( + "sti\n" + "shadow_label:\n" + "movl $1, %0\n" + "cli\n" + : "=m" (*(mem+i*PAGE_SIZE)) + :: + ); + } + + test_ended = 1; + while (cpus_active() > 1) + cpu_relax(); + + report(!bad_int_received, "No interrupts during the interrupt shadow"); + return report_summary(); +} diff --git a/x86/unittests.cfg b/x86/unittests.cfg index f324e32d..e803ba03 100644 --- a/x86/unittests.cfg +++ b/x86/unittests.cfg @@ -478,3 +478,8 @@ file = cet.flat arch = x86_64 smp = 2 extra_params = -enable-kvm -m 2048 -cpu host + +[smm_int_window] +file = smm_int_window.flat +smp = 2 +extra_params = -machine smm=on -machine kernel-irqchip=on -m 2g -- 2.34.3