On Wed, 17 Jun 2020 at 00:06, Ard Biesheuvel <ardb@xxxxxxxxxx> wrote: > > On Tue, 16 Jun 2020 at 21:48, Arvind Sankar <nivedita@xxxxxxxxxxxx> wrote: > > > > Commit > > 17054f492dfd ("efi/x86: Implement mixed mode boot without the handover protocol") > > introduced a new entry point for the EFI stub to be booted in mixed mode > > on 32-bit firmware. > > > > When entered via efi32_pe_entry, control is first transferred to > > startup_32 to setup for the switch to long mode, and then the EFI stub > > proper is entered via efi_pe_entry. efi_pe_entry is an MS ABI function, > > and the ABI requires 32 bytes of shadow stack space to be allocated by > > the caller, as well as the stack being aligned to 8 mod 16 on entry. > > > > Allocate 40 bytes on the stack before switching to 64-bit mode when > > calling efi_pe_entry to account for this. > > > > Signed-off-by: Arvind Sankar <nivedita@xxxxxxxxxxxx> > > Queued as a fix, thanks. > Shouldn't boot_stack_end be 16 byte aligned for this to work reliably? This seems to work out in practice, as .bss is cacheline aligned, and the heap and stack happen to be emitted first. but it would be better to make this explicit. > > > --- > > arch/x86/boot/compressed/head_64.S | 10 +++++++++- > > 1 file changed, 9 insertions(+), 1 deletion(-) > > > > diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S > > index e821a7d7d5c4..d073e3c919dd 100644 > > --- a/arch/x86/boot/compressed/head_64.S > > +++ b/arch/x86/boot/compressed/head_64.S > > @@ -213,7 +213,6 @@ SYM_FUNC_START(startup_32) > > * We place all of the values on our mini stack so lret can > > * used to perform that far jump. > > */ > > - pushl $__KERNEL_CS > > leal startup_64(%ebp), %eax > > #ifdef CONFIG_EFI_MIXED > > movl efi32_boot_args(%ebp), %edi > > @@ -224,11 +223,20 @@ SYM_FUNC_START(startup_32) > > movl efi32_boot_args+8(%ebp), %edx // saved bootparams pointer > > cmpl $0, %edx > > jnz 1f > > + /* > > + * efi_pe_entry uses MS calling convention, which requires 32 bytes of > > + * shadow space on the stack even if all arguments are passed in > > + * registers. We also need an additional 8 bytes for the space that > > + * would be occupied by the return address, and this also results in > > + * the correct stack alignment for entry. > > + */ > > + subl $40, %esp > > leal efi_pe_entry(%ebp), %eax > > movl %edi, %ecx // MS calling convention > > movl %esi, %edx > > 1: > > #endif > > + pushl $__KERNEL_CS > > pushl %eax > > > > /* Enter paged protected Mode, activating Long Mode */ > > -- > > 2.26.2 > >