[PATCH v6 11/16] mm/hugetlb: Introduce remap_huge_page_pmd_vmemmap helper

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The __free_huge_page_pmd_vmemmap and __remap_huge_page_pmd_vmemmap are
almost the same code. So introduce remap_free_huge_page_pmd_vmemmap
helper to simplify the code.

Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
---
 mm/hugetlb_vmemmap.c | 87 +++++++++++++++++++++-------------------------------
 1 file changed, 35 insertions(+), 52 deletions(-)

diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index d6a1b06c1322..509ca451e232 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -127,6 +127,10 @@
 	(__boundary - 1 < (end) - 1) ? __boundary : (end);		 \
 })
 
+typedef void (*vmemmap_pte_remap_func_t)(struct page *reuse, pte_t *ptep,
+					 unsigned long start, unsigned long end,
+					 void *priv);
+
 static inline unsigned int vmemmap_pages_per_hpage(struct hstate *h)
 {
 	return free_vmemmap_pages_per_hpage(h) + RESERVE_VMEMMAP_NR;
@@ -162,21 +166,42 @@ static pmd_t *vmemmap_to_pmd(unsigned long page)
 	return pmd_offset(pud, page);
 }
 
+static void remap_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
+					unsigned long end,
+					vmemmap_pte_remap_func_t fn, void *priv)
+{
+	unsigned long next, addr = start;
+	struct page *reuse = NULL;
+
+	do {
+		pte_t *ptep;
+
+		ptep = pte_offset_kernel(pmd, addr);
+		if (!reuse)
+			reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
+
+		next = vmemmap_hpage_addr_end(addr, end);
+		fn(reuse, ptep, addr, next, priv);
+	} while (pmd++, addr = next, addr != end);
+
+	flush_tlb_kernel_range(start, end);
+}
+
 static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 					  unsigned long start,
-					  unsigned long end,
-					  struct list_head *remap_pages)
+					  unsigned long end, void *priv)
 {
 	pgprot_t pgprot = PAGE_KERNEL;
 	void *from = page_to_virt(reuse);
 	unsigned long addr;
+	struct list_head *pages = priv;
 
 	for (addr = start; addr < end; addr += PAGE_SIZE) {
 		void *to;
 		struct page *page;
 		pte_t entry, old = *ptep;
 
-		page = list_first_entry(remap_pages, struct page, lru);
+		page = list_first_entry(pages, struct page, lru);
 		list_del(&page->lru);
 		to = page_to_virt(page);
 		copy_page(to, from);
@@ -196,28 +221,6 @@ static void __remap_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 	}
 }
 
-static void __remap_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
-					  unsigned long end,
-					  struct list_head *vmemmap_pages)
-{
-	unsigned long next, addr = start;
-	struct page *reuse = NULL;
-
-	do {
-		pte_t *ptep;
-
-		ptep = pte_offset_kernel(pmd, addr);
-		if (!reuse)
-			reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
-
-		next = vmemmap_hpage_addr_end(addr, end);
-		__remap_huge_page_pte_vmemmap(reuse, ptep, addr, next,
-					      vmemmap_pages);
-	} while (pmd++, addr = next, addr != end);
-
-	flush_tlb_kernel_range(start, end);
-}
-
 static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
 {
 	unsigned int nr = free_vmemmap_pages_per_hpage(h);
@@ -258,7 +261,8 @@ void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
 
 	start = vmemmap_addr + RESERVE_VMEMMAP_SIZE;
 	end = vmemmap_addr + vmemmap_pages_size_per_hpage(h);
-	__remap_huge_page_pmd_vmemmap(pmd, start, end, &map_pages);
+	remap_huge_page_pmd_vmemmap(pmd, start, end,
+				    __remap_huge_page_pte_vmemmap, &map_pages);
 }
 
 static inline void free_vmemmap_page_list(struct list_head *list)
@@ -273,13 +277,13 @@ static inline void free_vmemmap_page_list(struct list_head *list)
 
 static void __free_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 					 unsigned long start,
-					 unsigned long end,
-					 struct list_head *free_pages)
+					 unsigned long end, void *priv)
 {
 	/* Make the tail pages are mapped read-only. */
 	pgprot_t pgprot = PAGE_KERNEL_RO;
 	pte_t entry = mk_pte(reuse, pgprot);
 	unsigned long addr;
+	struct list_head *pages = priv;
 
 	for (addr = start; addr < end; addr += PAGE_SIZE, ptep++) {
 		struct page *page;
@@ -287,34 +291,12 @@ static void __free_huge_page_pte_vmemmap(struct page *reuse, pte_t *ptep,
 
 		VM_WARN_ON(!pte_present(old));
 		page = pte_page(old);
-		list_add(&page->lru, free_pages);
+		list_add(&page->lru, pages);
 
 		set_pte_at(&init_mm, addr, ptep, entry);
 	}
 }
 
-static void __free_huge_page_pmd_vmemmap(pmd_t *pmd, unsigned long start,
-					 unsigned long end,
-					 struct list_head *vmemmap_pages)
-{
-	unsigned long next, addr = start;
-	struct page *reuse = NULL;
-
-	do {
-		pte_t *ptep;
-
-		ptep = pte_offset_kernel(pmd, addr);
-		if (!reuse)
-			reuse = pte_page(ptep[TAIL_PAGE_REUSE]);
-
-		next = vmemmap_hpage_addr_end(addr, end);
-		__free_huge_page_pte_vmemmap(reuse, ptep, addr, next,
-					     vmemmap_pages);
-	} while (pmd++, addr = next, addr != end);
-
-	flush_tlb_kernel_range(start, end);
-}
-
 void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 {
 	pmd_t *pmd;
@@ -330,7 +312,8 @@ void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 
 	start = vmemmap_addr + RESERVE_VMEMMAP_SIZE;
 	end = vmemmap_addr + vmemmap_pages_size_per_hpage(h);
-	__free_huge_page_pmd_vmemmap(pmd, start, end, &free_pages);
+	remap_huge_page_pmd_vmemmap(pmd, start, end,
+				    __free_huge_page_pte_vmemmap, &free_pages);
 	free_vmemmap_page_list(&free_pages);
 }
 
-- 
2.11.0




[Index of Archives]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux