4.4-stable review patch. If anyone has any objections, please let me know. ------------------ From: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> This reverts commit b73adb60852034d84092d123b323196ca42529cd which is commit c9f2a9a65e4855b74d92cdad688f6ee4a1a323ff upstream. Turns there was too many other issues with this patch to make it viable for the stable tree. Reported-by: Ben Hutchings <ben.hutchings@xxxxxxxxxxxxxxx> Cc: Matt Fleming <matt@xxxxxxxxxxxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxxxxxx> Cc: Andy Lutomirski <luto@xxxxxxxxxx> Cc: Ard Biesheuvel <ard.biesheuvel@xxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Brian Gerst <brgerst@xxxxxxxxx> Cc: Dave Jones <davej@xxxxxxxxxxxxxxxxx> Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx> Cc: H. Peter Anvin <hpa@xxxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Sai Praneeth Prakhya <sai.praneeth.prakhya@xxxxxxxxx> Cc: Stephen Smalley <sds@xxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Toshi Kani <toshi.kani@xxxxxx> Cc: linux-efi@xxxxxxxxxxxxxxx Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: "Ghannam, Yazen" <Yazen.Ghannam@xxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/include/asm/efi.h | 25 -------------------- arch/x86/platform/efi/efi_64.c | 24 ++++++++++---------- arch/x86/platform/efi/efi_stub_64.S | 43 ++++++++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 36 deletions(-) --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -3,7 +3,6 @@ #include <asm/fpu/api.h> #include <asm/pgtable.h> -#include <asm/tlb.h> /* * We map the EFI regions needed for runtime services non-contiguously, @@ -65,17 +64,6 @@ extern u64 asmlinkage efi_call(void *fp, #define efi_call_phys(f, args...) efi_call((f), args) -/* - * Scratch space used for switching the pagetable in the EFI stub - */ -struct efi_scratch { - u64 r15; - u64 prev_cr3; - pgd_t *efi_pgt; - bool use_pgd; - u64 phys_stack; -} __packed; - #define efi_call_virt(f, ...) \ ({ \ efi_status_t __s; \ @@ -83,20 +71,7 @@ struct efi_scratch { efi_sync_low_kernel_mappings(); \ preempt_disable(); \ __kernel_fpu_begin(); \ - \ - if (efi_scratch.use_pgd) { \ - efi_scratch.prev_cr3 = read_cr3(); \ - write_cr3((unsigned long)efi_scratch.efi_pgt); \ - __flush_tlb_all(); \ - } \ - \ __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \ - \ - if (efi_scratch.use_pgd) { \ - write_cr3(efi_scratch.prev_cr3); \ - __flush_tlb_all(); \ - } \ - \ __kernel_fpu_end(); \ preempt_enable(); \ __s; \ --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -47,7 +47,16 @@ */ static u64 efi_va = EFI_VA_START; -struct efi_scratch efi_scratch; +/* + * Scratch space used for switching the pagetable in the EFI stub + */ +struct efi_scratch { + u64 r15; + u64 prev_cr3; + pgd_t *efi_pgt; + bool use_pgd; + u64 phys_stack; +} __packed; static void __init early_code_mapping_set_exec(int executable) { @@ -74,11 +83,8 @@ pgd_t * __init efi_call_phys_prolog(void int pgd; int n_pgds; - if (!efi_enabled(EFI_OLD_MEMMAP)) { - save_pgd = (pgd_t *)read_cr3(); - write_cr3((unsigned long)efi_scratch.efi_pgt); - goto out; - } + if (!efi_enabled(EFI_OLD_MEMMAP)) + return NULL; early_code_mapping_set_exec(1); @@ -90,7 +96,6 @@ pgd_t * __init efi_call_phys_prolog(void vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); } -out: __flush_tlb_all(); return save_pgd; @@ -104,11 +109,8 @@ void __init efi_call_phys_epilog(pgd_t * int pgd_idx; int nr_pgds; - if (!efi_enabled(EFI_OLD_MEMMAP)) { - write_cr3((unsigned long)save_pgd); - __flush_tlb_all(); + if (!save_pgd) return; - } nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -38,6 +38,41 @@ mov %rsi, %cr0; \ mov (%rsp), %rsp + /* stolen from gcc */ + .macro FLUSH_TLB_ALL + movq %r15, efi_scratch(%rip) + movq %r14, efi_scratch+8(%rip) + movq %cr4, %r15 + movq %r15, %r14 + andb $0x7f, %r14b + movq %r14, %cr4 + movq %r15, %cr4 + movq efi_scratch+8(%rip), %r14 + movq efi_scratch(%rip), %r15 + .endm + + .macro SWITCH_PGT + cmpb $0, efi_scratch+24(%rip) + je 1f + movq %r15, efi_scratch(%rip) # r15 + # save previous CR3 + movq %cr3, %r15 + movq %r15, efi_scratch+8(%rip) # prev_cr3 + movq efi_scratch+16(%rip), %r15 # EFI pgt + movq %r15, %cr3 + 1: + .endm + + .macro RESTORE_PGT + cmpb $0, efi_scratch+24(%rip) + je 2f + movq efi_scratch+8(%rip), %r15 + movq %r15, %cr3 + movq efi_scratch(%rip), %r15 + FLUSH_TLB_ALL + 2: + .endm + ENTRY(efi_call) SAVE_XMM mov (%rsp), %rax @@ -48,8 +83,16 @@ ENTRY(efi_call) mov %r8, %r9 mov %rcx, %r8 mov %rsi, %rcx + SWITCH_PGT call *%rdi + RESTORE_PGT addq $48, %rsp RESTORE_XMM ret ENDPROC(efi_call) + + .data +ENTRY(efi_scratch) + .fill 3,8,0 + .byte 0 + .quad 0