This is a note to let you know that I've just added the patch titled x86/entry/64: Move the IST stacks into struct cpu_entry_area to the 4.14-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: x86-entry-64-move-the-ist-stacks-into-struct-cpu_entry_area.patch and it can be found in the queue-4.14 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From 40e7f949e0d9a33968ebde5d67f7e3a47c97742a Mon Sep 17 00:00:00 2001 From: Andy Lutomirski <luto@xxxxxxxxxx> Date: Mon, 4 Dec 2017 15:07:26 +0100 Subject: x86/entry/64: Move the IST stacks into struct cpu_entry_area From: Andy Lutomirski <luto@xxxxxxxxxx> commit 40e7f949e0d9a33968ebde5d67f7e3a47c97742a upstream. The IST stacks are needed when an IST exception occurs and are accessed before any kernel code at all runs. Move them into struct cpu_entry_area. The IST stacks are unlike the rest of cpu_entry_area: they're used even for entries from kernel mode. This means that they should be set up before we load the final IDT. Move cpu_entry_area setup to trap_init() for the boot CPU and set it up for all possible CPUs at once in native_smp_prepare_cpus(). Signed-off-by: Andy Lutomirski <luto@xxxxxxxxxx> Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Reviewed-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Reviewed-by: Borislav Petkov <bp@xxxxxxx> Cc: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx> Cc: Borislav Petkov <bp@xxxxxxxxx> Cc: Borislav Petkov <bpetkov@xxxxxxx> Cc: Brian Gerst <brgerst@xxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxx> Cc: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx> Cc: David Laight <David.Laight@xxxxxxxxxx> Cc: Denys Vlasenko <dvlasenk@xxxxxxxxxx> Cc: Eduardo Valentin <eduval@xxxxxxxxxx> Cc: Greg KH <gregkh@xxxxxxxxxxxxxxxxxxx> Cc: H. Peter Anvin <hpa@xxxxxxxxx> Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx> Cc: Juergen Gross <jgross@xxxxxxxx> Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Rik van Riel <riel@xxxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Cc: aliguori@xxxxxxxxxx Cc: daniel.gruss@xxxxxxxxxxxxxx Cc: hughd@xxxxxxxxxx Cc: keescook@xxxxxxxxxx Link: https://lkml.kernel.org/r/20171204150606.480598743@xxxxxxxxxxxxx Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/x86/include/asm/fixmap.h | 12 ++++++ arch/x86/kernel/cpu/common.c | 74 +++++++++++++++++++++++------------------- arch/x86/kernel/traps.c | 3 + 3 files changed, 57 insertions(+), 32 deletions(-) --- a/arch/x86/include/asm/fixmap.h +++ b/arch/x86/include/asm/fixmap.h @@ -63,10 +63,22 @@ struct cpu_entry_area { struct tss_struct tss; char entry_trampoline[PAGE_SIZE]; + +#ifdef CONFIG_X86_64 + /* + * Exception stacks used for IST entries. + * + * In the future, this should have a separate slot for each stack + * with guard pages between them. + */ + char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]; +#endif }; #define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE) +extern void setup_cpu_entry_areas(void); + /* * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -466,24 +466,36 @@ void load_percpu_segment(int cpu) load_stack_canary_segment(); } -static void set_percpu_fixmap_pages(int fixmap_index, void *ptr, - int pages, pgprot_t prot) -{ - int i; - - for (i = 0; i < pages; i++) { - __set_fixmap(fixmap_index - i, - per_cpu_ptr_to_phys(ptr + i * PAGE_SIZE), prot); - } -} - #ifdef CONFIG_X86_32 /* The 32-bit entry code needs to find cpu_entry_area. */ DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area); #endif +#ifdef CONFIG_X86_64 +/* + * Special IST stacks which the CPU switches to when it calls + * an IST-marked descriptor entry. Up to 7 stacks (hardware + * limit), all of them are 4K, except the debug stack which + * is 8K. + */ +static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, + [DEBUG_STACK - 1] = DEBUG_STKSZ +}; + +static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks + [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); +#endif + +static void __init +set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) +{ + for ( ; pages; pages--, idx--, ptr += PAGE_SIZE) + __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot); +} + /* Setup the fixmap mappings only once per-processor */ -static inline void setup_cpu_entry_area(int cpu) +static void __init setup_cpu_entry_area(int cpu) { #ifdef CONFIG_X86_64 extern char _entry_trampoline[]; @@ -532,15 +544,31 @@ static inline void setup_cpu_entry_area( PAGE_KERNEL); #ifdef CONFIG_X86_32 - this_cpu_write(cpu_entry_area, get_cpu_entry_area(cpu)); + per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); #endif #ifdef CONFIG_X86_64 + BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0); + BUILD_BUG_ON(sizeof(exception_stacks) != + sizeof(((struct cpu_entry_area *)0)->exception_stacks)); + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks), + &per_cpu(exception_stacks, cpu), + sizeof(exception_stacks) / PAGE_SIZE, + PAGE_KERNEL); + __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline), __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX); #endif } +void __init setup_cpu_entry_areas(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) + setup_cpu_entry_area(cpu); +} + /* Load the original GDT from the per-cpu structure */ void load_direct_gdt(int cpu) { @@ -1385,20 +1413,6 @@ DEFINE_PER_CPU(unsigned int, irq_count) DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT; EXPORT_PER_CPU_SYMBOL(__preempt_count); -/* - * Special IST stacks which the CPU switches to when it calls - * an IST-marked descriptor entry. Up to 7 stacks (hardware - * limit), all of them are 4K, except the debug stack which - * is 8K. - */ -static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { - [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, - [DEBUG_STACK - 1] = DEBUG_STKSZ -}; - -static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks - [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); - /* May not be marked __init: used by software suspend */ void syscall_init(void) { @@ -1607,7 +1621,7 @@ void cpu_init(void) * set up and load the per-CPU TSS */ if (!oist->ist[0]) { - char *estacks = per_cpu(exception_stacks, cpu); + char *estacks = get_cpu_entry_area(cpu)->exception_stacks; for (v = 0; v < N_EXCEPTION_STACKS; v++) { estacks += exception_stack_sizes[v]; @@ -1633,8 +1647,6 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, me); - setup_cpu_entry_area(cpu); - /* * Initialize the TSS. sp0 points to the entry trampoline stack * regardless of what task is running. @@ -1694,8 +1706,6 @@ void cpu_init(void) initialize_tlbstate_and_flush(); enter_lazy_tlb(&init_mm, curr); - setup_cpu_entry_area(cpu); - /* * Initialize the TSS. Don't bother initializing sp0, as the initial * task never enters user mode. --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -947,6 +947,9 @@ dotraplinkage void do_iret_error(struct void __init trap_init(void) { + /* Init cpu_entry_area before IST entries are set up */ + setup_cpu_entry_areas(); + idt_setup_traps(); /* Patches currently in stable-queue which might be from luto@xxxxxxxxxx are queue-4.14/x86-entry-64-simplify-reg-restore-code-in-the-standard-iret-paths.patch queue-4.14/x86-cpufeatures-fix-various-details-in-the-feature-definitions.patch queue-4.14/x86-entry-64-move-the-ist-stacks-into-struct-cpu_entry_area.patch queue-4.14/x86-dumpstack-add-get_stack_info-support-for-the-sysenter-stack.patch queue-4.14/x86-asm-don-t-use-the-confusing-.ifeq-directive.patch queue-4.14/selftests-x86-ldt_gdt-add-infrastructure-to-test-set_thread_area.patch queue-4.14/x86-entry-remap-the-tss-into-the-cpu-entry-area.patch queue-4.14/x86-entry-64-paravirt-use-paravirt-safe-macro-to-access-eflags.patch queue-4.14/x86-mm-fixmap-generalize-the-gdt-fixmap-mechanism-introduce-struct-cpu_entry_area.patch queue-4.14/x86-paravirt-dont-patch-flush_tlb_single.patch queue-4.14/x86-dumpstack-handle-stack-overflow-on-all-stacks.patch queue-4.14/x86-entry-64-use-pop-instead-of-movq-in-syscall_return_via_sysret.patch queue-4.14/x86-entry-64-return-to-userspace-from-the-trampoline-stack.patch queue-4.14/x86-paravirt-provide-a-way-to-check-for-hypervisors.patch queue-4.14/xen-x86-entry-64-add-xen-nmi-trap-entry.patch queue-4.14/x86-entry-64-remove-the-restore_c_regs_and_iret-label.patch queue-4.14/x86-entry-64-create-a-per-cpu-syscall-entry-trampoline.patch queue-4.14/x86-entry-64-remove-the-restore_..._regs-infrastructure.patch queue-4.14/x86-xen-64-x86-entry-64-clean-up-sp-code-in-cpu_initialize_context.patch queue-4.14/x86-entry-add-task_top_of_stack-to-find-the-top-of-a-task-s-stack.patch queue-4.14/x86-entry-64-remove-all-remaining-direct-thread_struct-sp0-reads.patch queue-4.14/x86-boot-relocate-definition-of-the-initial-state-of-cr0.patch queue-4.14/x86-entry-64-stop-initializing-tss.sp0-at-boot.patch queue-4.14/x86-entry-64-de-xen-ify-our-nmi-code.patch queue-4.14/objtool-don-t-report-end-of-section-error-after-an-empty-unwind-hint.patch queue-4.14/x86-entry-64-pass-sp0-directly-to-load_sp0.patch queue-4.14/x86-entry-64-use-a-per-cpu-trampoline-stack-for-idt-entries.patch queue-4.14/x86-entry-64-move-swapgs-into-the-common-iret-to-usermode-path.patch queue-4.14/x86-entry-64-shorten-test-instructions.patch queue-4.14/x86-cpufeature-add-user-mode-instruction-prevention-definitions.patch queue-4.14/x86-cpufeatures-make-cpu-bugs-sticky.patch queue-4.14/x86-espfix-64-stop-assuming-that-pt_regs-is-on-the-entry-stack.patch queue-4.14/x86-traps-use-a-new-on_thread_stack-helper-to-clean-up-an-assertion.patch queue-4.14/x86-xen-fix-xen-head-elf-annotations.patch queue-4.14/x86-entry-64-remove-thread_struct-sp0.patch queue-4.14/x86-entry-move-sysenter_stack-to-the-beginning-of-struct-tss_struct.patch queue-4.14/x86-entry-64-allocate-and-enable-the-sysenter-stack.patch queue-4.14/x86-unwinder-orc-dont-bail-on-stack-overflow.patch queue-4.14/x86-head-fix-head-elf-function-annotations.patch queue-4.14/selftests-x86-ldt_gdt-run-most-existing-ldt-test-cases-against-the-gdt-as-well.patch queue-4.14/x86-entry-32-pull-the-msr_ia32_sysenter_cs-update-code-out-of-native_load_sp0.patch queue-4.14/x86-entry-64-split-the-iret-to-user-and-iret-to-kernel-paths.patch queue-4.14/x86-entry-64-shrink-paranoid_exit_restore-and-make-labels-local.patch queue-4.14/x86-head-add-unwind-hint-annotations.patch queue-4.14/x86-head-remove-unused-bad_address-code.patch queue-4.14/x86-xen-add-unwind-hint-annotations.patch queue-4.14/x86-kasan-64-teach-kasan-about-the-cpu_entry_area.patch queue-4.14/x86-head-remove-confusing-comment.patch queue-4.14/x86-entry-64-remove-the-sysenter-stack-canary.patch queue-4.14/x86-mm-kasan-don-t-use-vmemmap_populate-to-initialize-shadow.patch queue-4.14/x86-entry-gdt-put-per-cpu-gdt-remaps-in-ascending-order.patch queue-4.14/x86-entry-fix-assumptions-that-the-hw-tss-is-at-the-beginning-of-cpu_tss.patch queue-4.14/x86-cpufeatures-re-tabulate-the-x86_feature-definitions.patch queue-4.14/x86-entry-32-fix-cpu_current_top_of_stack-initialization-at-boot.patch queue-4.14/ptrace-x86-make-user_64bit_mode-available-to-32-bit-builds.patch queue-4.14/x86-entry-64-make-cpu_entry_area.tss-read-only.patch queue-4.14/x86-mm-relocate-page-fault-error-codes-to-traps.h.patch queue-4.14/x86-unwinder-handle-stack-overflows-more-gracefully.patch queue-4.14/x86-entry-64-use-pop-instead-of-mov-to-restore-regs-on-nmi-return.patch queue-4.14/x86-irq-64-print-the-offending-ip-in-the-stack-overflow-warning.patch queue-4.14/x86-entry-clean-up-the-sysenter_stack-code.patch queue-4.14/x86-entry-64-merge-the-fast-and-slow-sysret-paths.patch queue-4.14/x86-entry-64-separate-cpu_current_top_of_stack-from-tss.sp0.patch queue-4.14/x86-boot-annotate-verify_cpu-as-a-callable-function.patch queue-4.14/x86-irq-remove-an-old-outdated-comment-about-context-tracking-races.patch