[RFC PATCH 10/16] mm/madvise: let madvise_dontneed_single_vma() caller batches tlb flushes

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Update madvise_dontneed_single_vma() function so that the caller can
pass an mmu_gather object that should be initialized and will be
finished outside, for batched tlb flushes.  Also modify
madvise_dontneed_single_vma() internal code to support such usage by
skipping the initialization and finishing of self-allocated mmu_gather
object if it received a valid mmu_gather object.

Signed-off-by: SeongJae Park <sj@xxxxxxxxxx>
---
 mm/internal.h |  3 +++
 mm/madvise.c  | 10 +++++++---
 mm/memory.c   |  6 +++---
 3 files changed, 13 insertions(+), 6 deletions(-)

diff --git a/mm/internal.h b/mm/internal.h
index 0413822ee34b..45b8d08cd176 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -438,6 +438,9 @@ void unmap_page_range(struct mmu_gather *tlb,
 			     struct vm_area_struct *vma,
 			     unsigned long addr, unsigned long end,
 			     struct zap_details *details);
+void unmap_vma_single(struct mmu_gather *tlb, struct vm_area_struct *vma,
+		      unsigned long addr, unsigned long size,
+		      struct zap_details *details);
 int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio,
 			   gfp_t gfp);
 
diff --git a/mm/madvise.c b/mm/madvise.c
index 73a4323197e2..22da6699613c 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -848,7 +848,8 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
  * An interface that causes the system to free clean pages and flush
  * dirty pages is already available as msync(MS_INVALIDATE).
  */
-static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
+static long madvise_dontneed_single_vma(struct mmu_gather *tlb,
+					struct vm_area_struct *vma,
 					unsigned long start, unsigned long end)
 {
 	struct zap_details details = {
@@ -856,7 +857,10 @@ static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
 		.even_cows = true,
 	};
 
-	zap_page_range_single(vma, start, end - start, &details);
+	if (!tlb)
+		zap_page_range_single(vma, start, end - start, &details);
+	else
+		unmap_vma_single(tlb, vma, start, end - start, &details);
 	return 0;
 }
 
@@ -951,7 +955,7 @@ static long madvise_dontneed_free(struct vm_area_struct *vma,
 	}
 
 	if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
-		return madvise_dontneed_single_vma(vma, start, end);
+		return madvise_dontneed_single_vma(NULL, vma, start, end);
 	else if (behavior == MADV_FREE)
 		return madvise_free_single_vma(vma, start, end);
 	else
diff --git a/mm/memory.c b/mm/memory.c
index aadb2844c701..ba011c064936 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2011,9 +2011,9 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
 	mmu_notifier_invalidate_range_end(&range);
 }
 
-static void unmap_vma_single(struct mmu_gather *tlb,
-		struct vm_area_struct *vma, unsigned long address,
-		unsigned long size, struct zap_details *details)
+void unmap_vma_single(struct mmu_gather *tlb, struct vm_area_struct *vma,
+		      unsigned long address, unsigned long size,
+		      struct zap_details *details)
 {
 	const unsigned long end = address + size;
 	struct mmu_notifier_range range;
-- 
2.39.5




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux