From: Joerg Roedel <jroedel@xxxxxxx> The AP jump table under SEV-ES contains the reset vector where non-boot CPUs start executing when coming out of reset. This means that a CPU coming out of the AP-reset-hold VMGEXIT also needs to start executing at the reset vector stored in the AP jump table. The problem is to find a safe place to put the real-mode code which executes the VMGEXIT and jumps to the reset vector. The code can not be in kernel memory, because after kexec that memory is owned by the new kernel and the code might have been overwritten. Fortunately the AP jump table itself is a safe place, because the memory is not owned by the OS and will not be overwritten by a new kernel started through kexec. The table is 4k in size and only the first 4 bytes are used for the reset vector. This leaves enough space for some 16-bit code to do the job and even a small stack. The AP jump table must be 4K in size, in encrypted memory and it must be 4K (page) aligned. There can only be one AP jump table and it should reside in memory that has been marked as reserved by UEFI. Install 16-bit code into the AP jump table under SEV-ES after the APs have been brought up. The code will do an AP-reset-hold VMGEXIT and jump to the reset vector after being woken up. Signed-off-by: Joerg Roedel <jroedel@xxxxxxx> --- arch/x86/include/asm/realmode.h | 2 + arch/x86/include/asm/sev-ap-jumptable.h | 29 ++++++ arch/x86/kernel/sev.c | 104 +++++++++++++++++++ arch/x86/realmode/Makefile | 9 +- arch/x86/realmode/rmpiggy.S | 6 ++ arch/x86/realmode/sev/Makefile | 33 ++++++ arch/x86/realmode/sev/ap_jump_table.S | 131 ++++++++++++++++++++++++ arch/x86/realmode/sev/ap_jump_table.lds | 24 +++++ 8 files changed, 337 insertions(+), 1 deletion(-) create mode 100644 arch/x86/include/asm/sev-ap-jumptable.h create mode 100644 arch/x86/realmode/sev/Makefile create mode 100644 arch/x86/realmode/sev/ap_jump_table.S create mode 100644 arch/x86/realmode/sev/ap_jump_table.lds diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index 331474b150f1..d17f495e86cd 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -62,6 +62,8 @@ extern unsigned long initial_gs; extern unsigned long initial_stack; #ifdef CONFIG_AMD_MEM_ENCRYPT extern unsigned long initial_vc_handler; +extern unsigned char rm_ap_jump_table_blob[]; +extern unsigned char rm_ap_jump_table_blob_end[]; #endif extern unsigned char real_mode_blob[]; diff --git a/arch/x86/include/asm/sev-ap-jumptable.h b/arch/x86/include/asm/sev-ap-jumptable.h new file mode 100644 index 000000000000..710547999dee --- /dev/null +++ b/arch/x86/include/asm/sev-ap-jumptable.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * AMD Encrypted Register State Support + * + * Author: Joerg Roedel <jroedel@xxxxxxx> + */ +#ifndef __ASM_SEV_AP_JUMPTABLE_H +#define __ASM_SEV_AP_JUMPTABLE_H + +#define SEV_APJT_CS16 0x8 +#define SEV_APJT_DS16 0x10 + +#define SEV_APJT_ENTRY 0x10 + +#ifndef __ASSEMBLY__ + +/* + * The reset_ip and reset_cs members are fixed and defined through the GHCB + * specification. Do not change or move them around. + */ +struct sev_ap_jump_table_header { + u16 reset_ip; + u16 reset_cs; + u16 ap_jumptable_gdt; +}; + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_SEV_AP_JUMPTABLE_H */ diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 969ef9855bb5..ea93cb58f1e3 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -19,6 +19,7 @@ #include <linux/kernel.h> #include <linux/mm.h> +#include <asm/sev-ap-jumptable.h> #include <asm/cpu_entry_area.h> #include <asm/stacktrace.h> #include <asm/sev.h> @@ -46,6 +47,9 @@ static struct ghcb __initdata *boot_ghcb; /* Cached AP jump table Address */ static phys_addr_t jump_table_pa; +/* Whether the AP jump table blob was successfully installed */ +static bool sev_ap_jumptable_blob_installed __ro_after_init; + /* #VC handler runtime per-CPU data */ struct sev_es_runtime_data { struct ghcb ghcb_page; @@ -727,6 +731,106 @@ static void __init sev_es_setup_play_dead(void) static inline void sev_es_setup_play_dead(void) { } #endif +/* + * Make the necessary runtime changes to the AP jump table blob. For now this + * only sets up the GDT used while the code executes. The GDT needs to contain + * 16-bit code and data segments with a base that points to AP jump table page. + */ +void __init sev_es_setup_ap_jump_table_data(void *base, u32 pa) +{ + struct sev_ap_jump_table_header *header; + struct desc_ptr *gdt_descr; + u64 *ap_jumptable_gdt; + + header = base; + + /* + * Setup 16-bit protected mode code and data segments for AP jump table. + * Set the segment limits to 0xffff to already be compatible with + * real-mode. + */ + ap_jumptable_gdt = (u64 *)(base + header->ap_jumptable_gdt); + ap_jumptable_gdt[SEV_APJT_CS16 / 8] = GDT_ENTRY(0x9b, pa, 0xffff); + ap_jumptable_gdt[SEV_APJT_DS16 / 8] = GDT_ENTRY(0x93, pa, 0xffff); + + /* Write correct GDT base address into GDT descriptor */ + gdt_descr = (struct desc_ptr *)(base + header->ap_jumptable_gdt); + gdt_descr->address += pa; +} + +/* + * Set up the AP jump table blob which contains code which runs in 16-bit + * protected mode to park an AP. After the AP is woken up again the code will + * disable protected mode and jump to the reset vector which is also stored in + * the AP jump table. + * + * The jump table is a safe place to park an AP, because it is owned by the + * BIOS and writable by the OS. Putting the code in kernel memory would break + * with kexec, because by the time the APs wake up the memory is owned by + * the new kernel, and possibly already overwritten. + * + * Kexec is also the reason this function is an init-call after SMP bringup. + * Only after all CPUs are up there is a guarantee that no AP is still parked in + * AP jump-table code. + */ +static int __init sev_setup_ap_jump_table(void) +{ + size_t blob_size = rm_ap_jump_table_blob_end - rm_ap_jump_table_blob; + u16 startup_cs, startup_ip; + u16 __iomem *jump_table; + phys_addr_t pa; + + if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) + return 0; + + if (ghcb_info.vm_proto < 2) { + pr_warn("AP jump table parking requires at least GHCB protocol version 2\n"); + return 0; + } + + pa = get_jump_table_addr(); + + /* On UP guests there is no jump table so this is not a failure */ + if (!pa) + return 0; + + /* Check overflow and size for untrusted jump table address */ + if (pa + PAGE_SIZE < pa || pa + PAGE_SIZE > SZ_4G) { + pr_info("AP jump table is above 4GB or address overflow - not enabling AP jump table parking\n"); + return 0; + } + + jump_table = ioremap_encrypted(pa, PAGE_SIZE); + if (WARN_ON(!jump_table)) + return -EINVAL; + + /* + * Save reset vector to restore it later because the blob will + * overwrite it. + */ + startup_ip = jump_table[0]; + startup_cs = jump_table[1]; + + /* Install AP jump table Blob with real mode AP parking code */ + memcpy_toio(jump_table, rm_ap_jump_table_blob, blob_size); + + /* Setup AP jump table GDT */ + sev_es_setup_ap_jump_table_data(jump_table, (u32)pa); + + writew(startup_ip, &jump_table[0]); + writew(startup_cs, &jump_table[1]); + + iounmap(jump_table); + + pr_info("AP jump table Blob successfully set up\n"); + + /* Mark AP jump table blob as available */ + sev_ap_jumptable_blob_installed = true; + + return 0; +} +core_initcall(sev_setup_ap_jump_table); + static void __init alloc_runtime_data(int cpu) { struct sev_es_runtime_data *data; diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile index a0b491ae2de8..00f3cceb9580 100644 --- a/arch/x86/realmode/Makefile +++ b/arch/x86/realmode/Makefile @@ -11,12 +11,19 @@ KASAN_SANITIZE := n KCSAN_SANITIZE := n +RMPIGGY-y = $(obj)/rm/realmode.bin +RMPIGGY-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev/ap_jump_table.bin + subdir- := rm +subdir- := sev obj-y += init.o obj-y += rmpiggy.o -$(obj)/rmpiggy.o: $(obj)/rm/realmode.bin +$(obj)/rmpiggy.o: $(RMPIGGY-y) $(obj)/rm/realmode.bin: FORCE $(Q)$(MAKE) $(build)=$(obj)/rm $@ + +$(obj)/sev/ap_jump_table.bin: FORCE + $(Q)$(MAKE) $(build)=$(obj)/sev $@ diff --git a/arch/x86/realmode/rmpiggy.S b/arch/x86/realmode/rmpiggy.S index c8fef76743f6..a659f98617ff 100644 --- a/arch/x86/realmode/rmpiggy.S +++ b/arch/x86/realmode/rmpiggy.S @@ -17,3 +17,9 @@ SYM_DATA_END_LABEL(real_mode_blob, SYM_L_GLOBAL, real_mode_blob_end) SYM_DATA_START(real_mode_relocs) .incbin "arch/x86/realmode/rm/realmode.relocs" SYM_DATA_END(real_mode_relocs) + +#ifdef CONFIG_AMD_MEM_ENCRYPT +SYM_DATA_START(rm_ap_jump_table_blob) + .incbin "arch/x86/realmode/sev/ap_jump_table.bin" +SYM_DATA_END_LABEL(rm_ap_jump_table_blob, SYM_L_GLOBAL, rm_ap_jump_table_blob_end) +#endif diff --git a/arch/x86/realmode/sev/Makefile b/arch/x86/realmode/sev/Makefile new file mode 100644 index 000000000000..7cf5f31f6419 --- /dev/null +++ b/arch/x86/realmode/sev/Makefile @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0 + +# Sanitizer runtimes are unavailable and cannot be linked here. +KASAN_SANITIZE := n +KCSAN_SANITIZE := n +OBJECT_FILES_NON_STANDARD := y + +# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. +KCOV_INSTRUMENT := n + +always-y := ap_jump_table.bin +ap_jump_table-y += ap_jump_table.o +targets += $(ap_jump_table-y) + +APJUMPTABLE_OBJS = $(addprefix $(obj)/,$(ap_jump_table-y)) + +LDFLAGS_ap_jump_table.elf := -m elf_i386 -T + +targets += ap_jump_table.elf +$(obj)/ap_jump_table.elf: $(obj)/ap_jump_table.lds $(APJUMPTABLE_OBJS) FORCE + $(call if_changed,ld) + +OBJCOPYFLAGS_ap_jump_table.bin := -O binary + +targets += ap_jump_table.bin +$(obj)/ap_jump_table.bin: $(obj)/ap_jump_table.elf FORCE + $(call if_changed,objcopy) + +# --------------------------------------------------------------------------- + +KBUILD_AFLAGS := $(REALMODE_CFLAGS) -D__ASSEMBLY__ +GCOV_PROFILE := n +UBSAN_SANITIZE := n diff --git a/arch/x86/realmode/sev/ap_jump_table.S b/arch/x86/realmode/sev/ap_jump_table.S new file mode 100644 index 000000000000..5eaa115d1cb6 --- /dev/null +++ b/arch/x86/realmode/sev/ap_jump_table.S @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/linkage.h> +#include <asm/msr-index.h> +#include <asm/sev-ap-jumptable.h> + +/* + * This file contains the source code for the binary blob which gets copied to + * the SEV-ES AP jump table to park APs while offlining CPUs or booting a new + * kernel via KEXEC. + * + * The AP jump table is the only safe place to put this code, as any memory the + * kernel allocates will be owned (and possibly overwritten) by the new kernel + * once the APs are woken up. + * + * This code runs in 16-bit protected mode, the CS, DS, and SS segment bases are + * set to the beginning of the AP jump table page. + * + * Since the GDT will also be gone when the AP wakes up, this blob contains its + * own GDT, which is set up by the AP jump table setup code with the correct + * offsets. + * + * Author: Joerg Roedel <jroedel@xxxxxxx> + */ + + .text + .org 0x0 + .code16 +SYM_DATA_START(ap_jumptable_header) + .word 0 /* reset IP */ + .word 0 /* reset CS */ + .word ap_jumptable_gdt /* GDT Offset */ +SYM_DATA_END(ap_jumptable_header) + + .org SEV_APJT_ENTRY +SYM_CODE_START(ap_park) + + /* Switch to AP jump table GDT first */ + lgdtl ap_jumptable_gdt + + /* Reload CS */ + ljmpw $SEV_APJT_CS16, $1f +1: + + /* Reload DS and SS */ + movl $SEV_APJT_DS16, %ecx + movl %ecx, %ds + movl %ecx, %ss + + /* + * Setup a stack pointing to the end of the AP jump table page. + * The stack is needed to reset EFLAGS after wakeup. + */ + movl $0x1000, %esp + + /* Execute AP reset hold VMGEXIT */ +2: xorl %edx, %edx + movl $0x6, %eax + movl $MSR_AMD64_SEV_ES_GHCB, %ecx + wrmsr + rep; vmmcall + rdmsr + movl %eax, %ecx + andl $0xfff, %ecx + cmpl $0x7, %ecx + jne 2b + shrl $12, %eax + jnz 3f + testl %edx, %edx + jnz 3f + jmp 2b +3: + /* + * Successfully woken up - patch the correct target into the far jump at + * the end. An indirect far jump does not work here, because at the time + * the jump is executed DS is already loaded with real-mode values. + */ + + /* Jump target is at address 0x0 - copy it to the far jump instruction */ + movl $0, %ecx + movl (%ecx), %eax + movl %eax, jump_target + + /* Set EFLAGS to reset value (bit 1 is hard-wired to 1) */ + pushl $2 + popfl + + /* Setup DS and SS for real-mode */ + movl $0x18, %ecx + movl %ecx, %ds + movl %ecx, %ss + + /* Reset remaining registers */ + movl $0, %esp + movl $0, %eax + movl $0, %ebx + movl $0, %edx + + /* Set CR0 to reset value to drop out of protected mode */ + movl $0x60000010, %ecx + movl %ecx, %cr0 + + /* + * The below sums up to a far-jump instruction which jumps to the reset + * vector configured in the AP jump table and to real-mode. An indirect + * jump would be cleaner, but requires a working DS base/limit. DS is + * already loaded with real-mode values, therefore a direct far jump is + * used which got the correct target patched in. + */ + .byte 0xea +SYM_DATA_LOCAL(jump_target, .long 0) + +SYM_CODE_END(ap_park) + /* Here comes the GDT */ + .balign 16 +SYM_DATA_START_LOCAL(ap_jumptable_gdt) + /* Offset zero used for GDT descriptor */ + .word ap_jumptable_gdt_end - ap_jumptable_gdt - 1 + .long ap_jumptable_gdt + .word 0 + + /* 16 bit code segment - setup at boot */ + .quad 0 + + /* 16 bit data segment - setup at boot */ + .quad 0 + + /* Offset 0x8 - real-mode data segment */ + .long 0xffff0180 + .long 0x00009300 +SYM_DATA_END_LABEL(ap_jumptable_gdt, SYM_L_LOCAL, ap_jumptable_gdt_end) diff --git a/arch/x86/realmode/sev/ap_jump_table.lds b/arch/x86/realmode/sev/ap_jump_table.lds new file mode 100644 index 000000000000..4e47f1a6eb4e --- /dev/null +++ b/arch/x86/realmode/sev/ap_jump_table.lds @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ap_jump_table.lds + * + * Linker script for the SEV-ES AP jump table code + */ + +OUTPUT_FORMAT("elf32-i386") +OUTPUT_ARCH(i386) +ENTRY(ap_park) + +SECTIONS +{ + . = 0; + .text : { + *(.text) + *(.text.*) + } + + /DISCARD/ : { + *(.note*) + *(.debug*) + } +} -- 2.34.1 _______________________________________________ Virtualization mailing list Virtualization@xxxxxxxxxxxxxxxxxxxxxxxxxx https://lists.linuxfoundation.org/mailman/listinfo/virtualization