Use broadcast TLB invalidation, using the INVPLGB instruction, on AMD EPYC 3 and newer CPUs. In order to not exhaust PCID space, and keep TLB flushes local for single threaded processes, we only hand out broadcast ASIDs to processes active on 3 or more CPUs, and gradually increase the threshold as broadcast ASID space is depleted. Signed-off-by: Rik van Riel <riel@xxxxxxxxxxx> --- arch/x86/include/asm/mmu.h | 6 + arch/x86/include/asm/mmu_context.h | 12 ++ arch/x86/include/asm/tlbflush.h | 15 ++ arch/x86/mm/tlb.c | 313 ++++++++++++++++++++++++++++- 4 files changed, 337 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h index 3b496cdcb74b..a8e8dfa5a520 100644 --- a/arch/x86/include/asm/mmu.h +++ b/arch/x86/include/asm/mmu.h @@ -48,6 +48,12 @@ typedef struct { unsigned long flags; #endif +#ifdef CONFIG_CPU_SUP_AMD + struct list_head broadcast_asid_list; + u16 broadcast_asid; + bool asid_transition; +#endif + #ifdef CONFIG_ADDRESS_MASKING /* Active LAM mode: X86_CR3_LAM_U48 or X86_CR3_LAM_U57 or 0 (disabled) */ unsigned long lam_cr3_mask; diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 795fdd53bd0a..0dc446c427d2 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -139,6 +139,8 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm) #define enter_lazy_tlb enter_lazy_tlb extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); +extern void destroy_context_free_broadcast_asid(struct mm_struct *mm); + /* * Init a new mm. Used on mm copies, like at fork() * and on mm's that are brand-new, like at execve(). @@ -161,6 +163,13 @@ static inline int init_new_context(struct task_struct *tsk, mm->context.execute_only_pkey = -1; } #endif + +#ifdef CONFIG_CPU_SUP_AMD + INIT_LIST_HEAD(&mm->context.broadcast_asid_list); + mm->context.broadcast_asid = 0; + mm->context.asid_transition = false; +#endif + mm_reset_untag_mask(mm); init_new_context_ldt(mm); return 0; @@ -170,6 +179,9 @@ static inline int init_new_context(struct task_struct *tsk, static inline void destroy_context(struct mm_struct *mm) { destroy_context_ldt(mm); +#ifdef CONFIG_CPU_SUP_AMD + destroy_context_free_broadcast_asid(mm); +#endif } extern void switch_mm(struct mm_struct *prev, struct mm_struct *next, diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 20074f17fbcd..074f46b74b92 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -65,6 +65,21 @@ static inline void cr4_clear_bits(unsigned long mask) */ #define TLB_NR_DYN_ASIDS 6 +#ifdef CONFIG_CPU_SUP_AMD +#define is_dyn_asid(asid) (asid) < TLB_NR_DYN_ASIDS +#define is_broadcast_asid(asid) (asid) >= TLB_NR_DYN_ASIDS +#define in_asid_transition(info) (info->mm && info->mm->context.asid_transition) +#else +#define is_dyn_asid(asid) true +#define is_broadcast_asid(asid) false +#define in_asid_transition(info) false + +inline bool needs_broadcast_asid_reload(struct mm_struct *next, u16 prev_asid) +{ + return false; +} +#endif + struct tlb_context { u64 ctx_id; u64 tlb_gen; diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 64f1679c37e1..29a64f8c4c94 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -74,13 +74,15 @@ * use different names for each of them: * * ASID - [0, TLB_NR_DYN_ASIDS-1] - * the canonical identifier for an mm + * the canonical identifier for an mm, dynamically allocated on each CPU + * [TLB_NR_DYN_ASIDS, MAX_ASID_AVAILABLE-1] + * the canonical, global identifier for an mm, identical across all CPUs * - * kPCID - [1, TLB_NR_DYN_ASIDS] + * kPCID - [1, MAX_ASID_AVAILABLE] * the value we write into the PCID part of CR3; corresponds to the * ASID+1, because PCID 0 is special. * - * uPCID - [2048 + 1, 2048 + TLB_NR_DYN_ASIDS] + * uPCID - [2048 + 1, 2048 + MAX_ASID_AVAILABLE] * for KPTI each mm has two address spaces and thus needs two * PCID values, but we can still do with a single ASID denomination * for each mm. Corresponds to kPCID + 2048. @@ -225,6 +227,18 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, return; } + /* + * TLB consistency for this ASID is maintained with INVLPGB; + * TLB flushes happen even while the process isn't running. + */ +#ifdef CONFIG_CPU_SUP_AMD + if (static_cpu_has(X86_FEATURE_INVLPGB) && next->context.broadcast_asid) { + *new_asid = next->context.broadcast_asid; + *need_flush = false; + return; + } +#endif + if (this_cpu_read(cpu_tlbstate.invalidate_other)) clear_asid_other(); @@ -251,6 +265,248 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, *need_flush = true; } +#ifdef CONFIG_CPU_SUP_AMD +/* + * Logic for AMD INVLPGB support. + */ +static DEFINE_RAW_SPINLOCK(broadcast_asid_lock); +static u16 last_broadcast_asid = TLB_NR_DYN_ASIDS; +static DECLARE_BITMAP(broadcast_asid_used, MAX_ASID_AVAILABLE) = { 0 }; +static LIST_HEAD(broadcast_asid_list); +static int broadcast_asid_available = MAX_ASID_AVAILABLE - TLB_NR_DYN_ASIDS - 1; + +static void reset_broadcast_asid_space(void) +{ + mm_context_t *context; + + lockdep_assert_held(&broadcast_asid_lock); + + /* + * Flush once when we wrap around the ASID space, so we won't need + * to flush every time we allocate an ASID for boradcast flushing. + */ + invlpgb_flush_all_nonglobals(); + tlbsync(); + + /* + * Leave the currently used broadcast ASIDs set in the bitmap, since + * those cannot be reused before the next wraparound and flush.. + */ + bitmap_clear(broadcast_asid_used, 0, MAX_ASID_AVAILABLE); + list_for_each_entry(context, &broadcast_asid_list, broadcast_asid_list) + __set_bit(context->broadcast_asid, broadcast_asid_used); + + last_broadcast_asid = TLB_NR_DYN_ASIDS; +} + +static u16 get_broadcast_asid(void) +{ + lockdep_assert_held(&broadcast_asid_lock); + + do { + u16 start = last_broadcast_asid; + u16 asid = find_next_zero_bit(broadcast_asid_used, MAX_ASID_AVAILABLE, start); + + if (asid >= MAX_ASID_AVAILABLE) { + reset_broadcast_asid_space(); + continue; + } + + /* Try claiming this broadcast ASID. */ + if (!test_and_set_bit(asid, broadcast_asid_used)) { + last_broadcast_asid = asid; + return asid; + } + } while (1); +} + +/* + * Returns true if the mm is transitioning from a CPU-local ASID to a broadcast + * (INVLPGB) ASID, or the other way around. + */ +static bool needs_broadcast_asid_reload(struct mm_struct *next, u16 prev_asid) +{ + u16 broadcast_asid = next->context.broadcast_asid; + + if (broadcast_asid && prev_asid != broadcast_asid) + return true; + + if (!broadcast_asid && is_broadcast_asid(prev_asid)) + return true; + + return false; +} + +void destroy_context_free_broadcast_asid(struct mm_struct *mm) +{ + if (!mm->context.broadcast_asid) + return; + + guard(raw_spinlock_irqsave)(&broadcast_asid_lock); + mm->context.broadcast_asid = 0; + list_del(&mm->context.broadcast_asid_list); + broadcast_asid_available++; +} + +static int mm_active_cpus(struct mm_struct *mm) +{ + int count = 0; + int cpu; + + for_each_cpu(cpu, mm_cpumask(mm)) { + /* Skip the CPUs that aren't really running this process. */ + if (per_cpu(cpu_tlbstate.loaded_mm, cpu) != mm) + continue; + + if (per_cpu(cpu_tlbstate_shared.is_lazy, cpu)) + continue; + + count++; + } + return count; +} + +/* + * Assign a broadcast ASID to the current process, protecting against + * races between multiple threads in the process. + */ +static void use_broadcast_asid(struct mm_struct *mm) +{ + guard(raw_spinlock_irqsave)(&broadcast_asid_lock); + + /* This process is already using broadcast TLB invalidation. */ + if (mm->context.broadcast_asid) + return; + + mm->context.broadcast_asid = get_broadcast_asid(); + mm->context.asid_transition = true; + list_add(&mm->context.broadcast_asid_list, &broadcast_asid_list); + broadcast_asid_available--; +} + +/* + * Figure out whether to assign a broadcast (global) ASID to a process. + * We vary the threshold by how empty or full broadcast ASID space is. + * 1/4 full: >= 4 active threads + * 1/2 full: >= 8 active threads + * 3/4 full: >= 16 active threads + * 7/8 full: >= 32 active threads + * etc + * + * This way we should never exhaust the broadcast ASID space, even on very + * large systems, and the processes with the largest number of active + * threads should be able to use broadcast TLB invalidation. + */ +#define HALFFULL_THRESHOLD 8 +static bool meets_broadcast_asid_threshold(struct mm_struct *mm) +{ + int avail = broadcast_asid_available; + int threshold = HALFFULL_THRESHOLD; + int mm_active_threads; + + if (!avail) + return false; + + mm_active_threads = mm_active_cpus(mm); + + /* Small processes can just use IPI TLB flushing. */ + if (mm_active_threads < 3) + return false; + + if (avail > MAX_ASID_AVAILABLE * 3 / 4) { + threshold = HALFFULL_THRESHOLD / 4; + } else if (avail > MAX_ASID_AVAILABLE / 2) { + threshold = HALFFULL_THRESHOLD / 2; + } else if (avail < MAX_ASID_AVAILABLE / 3) { + do { + avail *= 2; + threshold *= 2; + } while ((avail + threshold ) < MAX_ASID_AVAILABLE / 2); + } + + return mm_active_threads > threshold; +} + +static void count_tlb_flush(struct mm_struct *mm) +{ + if (!static_cpu_has(X86_FEATURE_INVLPGB)) + return; + + /* Check every once in a while. */ + if ((current->pid & 0x1f) != (jiffies & 0x1f)) + return; + + if (meets_broadcast_asid_threshold(mm)) + use_broadcast_asid(mm); +} + +static void finish_asid_transition(struct flush_tlb_info *info) +{ + struct mm_struct *mm = info->mm; + int bc_asid = mm->context.broadcast_asid; + int cpu; + + if (!mm->context.asid_transition) + return; + + for_each_cpu(cpu, mm_cpumask(mm)) { + if (READ_ONCE(per_cpu(cpu_tlbstate.loaded_mm, cpu)) != mm) + continue; + + /* + * If at least one CPU is not using the broadcast ASID yet, + * send a TLB flush IPI. The IPI should cause stragglers + * to transition soon. + */ + if (per_cpu(cpu_tlbstate.loaded_mm_asid, cpu) != bc_asid) { + flush_tlb_multi(mm_cpumask(info->mm), info); + return; + } + } + + /* All the CPUs running this process are using the broadcast ASID. */ + mm->context.asid_transition = 0; +} + +static void broadcast_tlb_flush(struct flush_tlb_info *info) +{ + bool pmd = info->stride_shift == PMD_SHIFT; + unsigned long maxnr = invlpgb_count_max; + unsigned long asid = info->mm->context.broadcast_asid; + unsigned long addr = info->start; + unsigned long nr; + + /* Flushing multiple pages at once is not supported with 1GB pages. */ + if (info->stride_shift > PMD_SHIFT) + maxnr = 1; + + if (info->end == TLB_FLUSH_ALL) { + invlpgb_flush_single_pcid(kern_pcid(asid)); + /* Do any CPUs supporting INVLPGB need PTI? */ + if (static_cpu_has(X86_FEATURE_PTI)) + invlpgb_flush_single_pcid(user_pcid(asid)); + } else do { + /* + * Calculate how many pages can be flushed at once; if the + * remainder of the range is less than one page, flush one. + */ + nr = min(maxnr, (info->end - addr) >> info->stride_shift); + nr = max(nr, 1); + + invlpgb_flush_user_nr(kern_pcid(asid), addr, nr, pmd); + /* Do any CPUs supporting INVLPGB need PTI? */ + if (static_cpu_has(X86_FEATURE_PTI)) + invlpgb_flush_user_nr(user_pcid(asid), addr, nr, pmd); + addr += nr << info->stride_shift; + } while (addr < info->end); + + finish_asid_transition(info); + + /* Wait for the INVLPGBs kicked off above to finish. */ + tlbsync(); +} +#endif /* CONFIG_CPU_SUP_AMD */ + /* * Given an ASID, flush the corresponding user ASID. We can delay this * until the next time we switch to it. @@ -556,8 +812,9 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, */ if (prev == next) { /* Not actually switching mm's */ - VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != - next->context.ctx_id); + if (is_dyn_asid(prev_asid)) + VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != + next->context.ctx_id); /* * If this races with another thread that enables lam, 'new_lam' @@ -573,6 +830,23 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, !cpumask_test_cpu(cpu, mm_cpumask(next)))) cpumask_set_cpu(cpu, mm_cpumask(next)); + /* + * Check if the current mm is transitioning to a new ASID. + */ + if (needs_broadcast_asid_reload(next, prev_asid)) { + next_tlb_gen = atomic64_read(&next->context.tlb_gen); + + choose_new_asid(next, next_tlb_gen, &new_asid, &need_flush); + goto reload_tlb; + } + + /* + * Broadcast TLB invalidation keeps this PCID up to date + * all the time. + */ + if (is_broadcast_asid(prev_asid)) + return; + /* * If the CPU is not in lazy TLB mode, we are just switching * from one thread in a process to another thread in the same @@ -626,8 +900,10 @@ void switch_mm_irqs_off(struct mm_struct *unused, struct mm_struct *next, barrier(); } +reload_tlb: new_lam = mm_lam_cr3_mask(next); if (need_flush) { + VM_BUG_ON(is_broadcast_asid(new_asid)); this_cpu_write(cpu_tlbstate.ctxs[new_asid].ctx_id, next->context.ctx_id); this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); load_new_mm_cr3(next->pgd, new_asid, new_lam, true); @@ -746,7 +1022,7 @@ static void flush_tlb_func(void *info) const struct flush_tlb_info *f = info; struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm); u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); - u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); + u64 local_tlb_gen; bool local = smp_processor_id() == f->initiating_cpu; unsigned long nr_invalidate = 0; u64 mm_tlb_gen; @@ -769,6 +1045,16 @@ static void flush_tlb_func(void *info) if (unlikely(loaded_mm == &init_mm)) return; + /* Reload the ASID if transitioning into or out of a broadcast ASID */ + if (needs_broadcast_asid_reload(loaded_mm, loaded_mm_asid)) { + switch_mm_irqs_off(NULL, loaded_mm, NULL); + loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid); + } + + /* Broadcast ASIDs are always kept up to date with INVLPGB. */ + if (is_broadcast_asid(loaded_mm_asid)) + return; + VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != loaded_mm->context.ctx_id); @@ -786,6 +1072,8 @@ static void flush_tlb_func(void *info) return; } + local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen); + if (unlikely(f->new_tlb_gen != TLB_GENERATION_INVALID && f->new_tlb_gen <= local_tlb_gen)) { /* @@ -953,7 +1241,7 @@ STATIC_NOPV void native_flush_tlb_multi(const struct cpumask *cpumask, * up on the new contents of what used to be page tables, while * doing a speculative memory access. */ - if (info->freed_tables) + if (info->freed_tables || in_asid_transition(info)) on_each_cpu_mask(cpumask, flush_tlb_func, (void *)info, true); else on_each_cpu_cond_mask(should_flush_tlb, flush_tlb_func, @@ -1026,14 +1314,18 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, bool freed_tables) { struct flush_tlb_info *info; + unsigned long threshold = tlb_single_page_flush_ceiling; u64 new_tlb_gen; int cpu; + if (static_cpu_has(X86_FEATURE_INVLPGB)) + threshold *= invlpgb_count_max; + cpu = get_cpu(); /* Should we flush just the requested range? */ if ((end == TLB_FLUSH_ALL) || - ((end - start) >> stride_shift) > tlb_single_page_flush_ceiling) { + ((end - start) >> stride_shift) > threshold) { start = 0; end = TLB_FLUSH_ALL; } @@ -1049,9 +1341,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, * a local TLB flush is needed. Optimize this use-case by calling * flush_tlb_func_local() directly in this case. */ - if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) { + if (IS_ENABLED(CONFIG_CPU_SUP_AMD) && mm->context.broadcast_asid) { + broadcast_tlb_flush(info); + } else if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids) { info->trim_cpumask = should_trim_cpumask(mm); flush_tlb_multi(mm_cpumask(mm), info); + count_tlb_flush(mm); } else if (mm == this_cpu_read(cpu_tlbstate.loaded_mm)) { lockdep_assert_irqs_enabled(); local_irq_disable(); -- 2.47.1