Currently the tlb_remove_table_smp_sync IPI is sent to all CPUs indiscriminately, this causes unnecessary work and delays notable in real-time use-cases and isolated cpus, this patch will limit this IPI to only be sent to cpus referencing the effected mm and are currently in kernel space. Signed-off-by: Yair Podemsky <ypodemsk@xxxxxxxxxx> Suggested-by: David Hildenbrand <david@xxxxxxxxxx> --- include/asm-generic/tlb.h | 4 ++-- mm/khugepaged.c | 4 ++-- mm/mmu_gather.c | 20 +++++++++++++++----- 3 files changed, 19 insertions(+), 9 deletions(-) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index b46617207c93..0b6ba17cc8d3 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -222,7 +222,7 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #define tlb_needs_table_invalidate() (true) #endif -void tlb_remove_table_sync_one(void); +void tlb_remove_table_sync_one(struct mm_struct *mm); #else @@ -230,7 +230,7 @@ void tlb_remove_table_sync_one(void); #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE #endif -static inline void tlb_remove_table_sync_one(void) { } +static inline void tlb_remove_table_sync_one(struct mm_struct *mm) { } #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 5cb401aa2b9d..86a82c0ac41f 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1051,7 +1051,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, _pmd = pmdp_collapse_flush(vma, address, pmd); spin_unlock(pmd_ptl); mmu_notifier_invalidate_range_end(&range); - tlb_remove_table_sync_one(); + tlb_remove_table_sync_one(mm); spin_lock(pte_ptl); result = __collapse_huge_page_isolate(vma, address, pte, cc, @@ -1408,7 +1408,7 @@ static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v addr + HPAGE_PMD_SIZE); mmu_notifier_invalidate_range_start(&range); pmd = pmdp_collapse_flush(vma, addr, pmdp); - tlb_remove_table_sync_one(); + tlb_remove_table_sync_one(mm); mmu_notifier_invalidate_range_end(&range); mm_dec_nr_ptes(mm); page_table_check_pte_clear_range(mm, addr, pmd); diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 2b93cf6ac9ae..3b267600a5e9 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -9,6 +9,7 @@ #include <linux/smp.h> #include <linux/swap.h> #include <linux/rmap.h> +#include <linux/context_tracking_state.h> #include <asm/pgalloc.h> #include <asm/tlb.h> @@ -191,7 +192,15 @@ static void tlb_remove_table_smp_sync(void *arg) /* Simply deliver the interrupt */ } -void tlb_remove_table_sync_one(void) +static bool cpu_in_kernel(int cpu, void *info) +{ + struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); + int statue = atomic_read(&ct->state); + //will return true only for cpu's in kernel space + return !(statue & CT_STATE_MASK); +} + +void tlb_remove_table_sync_one(struct mm_struct *mm) { /* * This isn't an RCU grace period and hence the page-tables cannot be @@ -200,7 +209,8 @@ void tlb_remove_table_sync_one(void) * It is however sufficient for software page-table walkers that rely on * IRQ disabling. */ - smp_call_function(tlb_remove_table_smp_sync, NULL, 1); + on_each_cpu_cond_mask(cpu_in_kernel, tlb_remove_table_smp_sync, + NULL, true, mm_cpumask(mm)); } static void tlb_remove_table_rcu(struct rcu_head *head) @@ -237,9 +247,9 @@ static inline void tlb_table_invalidate(struct mmu_gather *tlb) } } -static void tlb_remove_table_one(void *table) +static void tlb_remove_table_one(struct mm_struct *mm, void *table) { - tlb_remove_table_sync_one(); + tlb_remove_table_sync_one(mm); __tlb_remove_table(table); } @@ -262,7 +272,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (*batch == NULL) { tlb_table_invalidate(tlb); - tlb_remove_table_one(table); + tlb_remove_table_one(tlb->mm, table); return; } (*batch)->nr = 0; -- 2.31.1