Currently the tlb_remove_table_smp_sync IPI is sent to all CPUs indiscriminately, this causes unnecessary work and delays notable in real-time use-cases and isolated cpus. This patch will limit this IPI on systems with ARCH_HAS_CPUMASK_BITS, Where the IPI will only be sent to cpus referencing the affected mm. Signed-off-by: Yair Podemsky <ypodemsk@xxxxxxxxxx> Suggested-by: David Hildenbrand <david@xxxxxxxxxx> --- include/asm-generic/tlb.h | 4 ++-- mm/khugepaged.c | 4 ++-- mm/mmu_gather.c | 17 ++++++++++++----- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index b46617207c93..0b6ba17cc8d3 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -222,7 +222,7 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #define tlb_needs_table_invalidate() (true) #endif -void tlb_remove_table_sync_one(void); +void tlb_remove_table_sync_one(struct mm_struct *mm); #else @@ -230,7 +230,7 @@ void tlb_remove_table_sync_one(void); #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE #endif -static inline void tlb_remove_table_sync_one(void) { } +static inline void tlb_remove_table_sync_one(struct mm_struct *mm) { } #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 6b9d39d65b73..3e5cb079d268 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1166,7 +1166,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, _pmd = pmdp_collapse_flush(vma, address, pmd); spin_unlock(pmd_ptl); mmu_notifier_invalidate_range_end(&range); - tlb_remove_table_sync_one(); + tlb_remove_table_sync_one(mm); spin_lock(pte_ptl); result = __collapse_huge_page_isolate(vma, address, pte, cc, @@ -1525,7 +1525,7 @@ static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v addr + HPAGE_PMD_SIZE); mmu_notifier_invalidate_range_start(&range); pmd = pmdp_collapse_flush(vma, addr, pmdp); - tlb_remove_table_sync_one(); + tlb_remove_table_sync_one(mm); mmu_notifier_invalidate_range_end(&range); mm_dec_nr_ptes(mm); page_table_check_pte_clear_range(mm, addr, pmd); diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index ea9683e12936..692d8175a88e 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -191,7 +191,13 @@ static void tlb_remove_table_smp_sync(void *arg) /* Simply deliver the interrupt */ } -void tlb_remove_table_sync_one(void) +#ifdef CONFIG_ARCH_HAS_CPUMASK_BITS +#define REMOVE_TABLE_IPI_MASK mm_cpumask(mm) +#else +#define REMOVE_TABLE_IPI_MASK cpu_online_mask +#endif /* CONFIG_ARCH_HAS_CPUMASK_BITS */ + +void tlb_remove_table_sync_one(struct mm_struct *mm) { /* * This isn't an RCU grace period and hence the page-tables cannot be @@ -200,7 +206,8 @@ void tlb_remove_table_sync_one(void) * It is however sufficient for software page-table walkers that rely on * IRQ disabling. */ - smp_call_function(tlb_remove_table_smp_sync, NULL, 1); + on_each_cpu_mask(REMOVE_TABLE_IPI_MASK, tlb_remove_table_smp_sync, + NULL, true); } static void tlb_remove_table_rcu(struct rcu_head *head) @@ -237,9 +244,9 @@ static inline void tlb_table_invalidate(struct mmu_gather *tlb) } } -static void tlb_remove_table_one(void *table) +static void tlb_remove_table_one(struct mm_struct *mm, void *table) { - tlb_remove_table_sync_one(); + tlb_remove_table_sync_one(mm); __tlb_remove_table(table); } @@ -262,7 +269,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); if (*batch == NULL) { tlb_table_invalidate(tlb); - tlb_remove_table_one(table); + tlb_remove_table_one(tlb->mm, table); return; } (*batch)->nr = 0; --- v2: replaced no REMOVE_TABLE_IPI_MASK REMOVE_TABLE_IPI_MASK to cpu_online_mask -- 2.39.3