On Fri, 2025-01-10 at 13:34 -0600, Tom Lendacky wrote: > > > +++ b/arch/x86/kernel/cpu/amd.c > > @@ -1143,6 +1143,14 @@ static void cpu_detect_tlb_amd(struct > > cpuinfo_x86 *c) > > > > /* Max number of pages INVLPGB can invalidate in one shot > > */ > > invlpgb_count_max = (edx & 0xffff) + 1; > > + > > + /* If supported, enable translation cache extensions (TCE) > > */ > > + cpuid(0x80000001, &eax, &ebx, &ecx, &edx); > > + if (ecx & BIT(17)) { > > Back to my comment from patch #4, you can put this under the > cpu_feature_enabled() check and just set it. > Ohhh nice, so I can just add a CPUID feature bit for TCE, and then have this? if(cpu_feature_enabled(X86_FEATURE_TCE)) msr_set_bit(MSR_EFER, EFER_TCE); That is much nicer. Is this the right location for that code, or do I need to move it somewhere else to guarantee TCE gets enabled on every CPU? > > + u64 msr = native_read_msr(MSR_EFER);; > > + msr |= BIT(15); > > + wrmsrl(MSR_EFER, msr); > > msr_set_bit() ? > > Thanks, > Tom > > > + } > > } > > > > static const struct cpu_dev amd_cpu_dev = { > > diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c > > index 454a370494d3..585d0731ca9f 100644 > > --- a/arch/x86/mm/tlb.c > > +++ b/arch/x86/mm/tlb.c > > @@ -477,7 +477,7 @@ static void broadcast_tlb_flush(struct > > flush_tlb_info *info) > > if (info->stride_shift > PMD_SHIFT) > > maxnr = 1; > > > > - if (info->end == TLB_FLUSH_ALL) { > > + if (info->end == TLB_FLUSH_ALL || info->freed_tables) { > > invlpgb_flush_single_pcid(kern_pcid(asid)); > > /* Do any CPUs supporting INVLPGB need PTI? */ > > if (static_cpu_has(X86_FEATURE_PTI)) > > @@ -1110,7 +1110,7 @@ static void flush_tlb_func(void *info) > > * > > * The only question is whether to do a full or partial > > flush. > > * > > - * We do a partial flush if requested and two extra > > conditions > > + * We do a partial flush if requested and three extra > > conditions > > * are met: > > * > > * 1. f->new_tlb_gen == local_tlb_gen + 1. We have an > > invariant that > > @@ -1137,10 +1137,14 @@ static void flush_tlb_func(void *info) > > * date. By doing a full flush instead, we can > > increase > > * local_tlb_gen all the way to mm_tlb_gen and we can > > probably > > * avoid another flush in the very near future. > > + * > > + * 3. No page tables were freed. If page tables were > > freed, a full > > + * flush ensures intermediate translations in the TLB > > get flushed. > > */ > > if (f->end != TLB_FLUSH_ALL && > > f->new_tlb_gen == local_tlb_gen + 1 && > > - f->new_tlb_gen == mm_tlb_gen) { > > + f->new_tlb_gen == mm_tlb_gen && > > + !f->freed_tables) { > > /* Partial flush */ > > unsigned long addr = f->start; > > > -- All Rights Reversed.