[PATCH v7 10/15] mm/hugetlb: Allocate the vmemmap pages associated with each hugetlb page

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



When we free a hugetlb page to the buddy, we should allocate the vmemmap
pages associated with it. We can do that in the __free_hugepage().

Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
---
 mm/hugetlb.c         |  2 ++
 mm/hugetlb_vmemmap.c | 90 +++++++++++++++++++++++++++++++++++++++++++++++++---
 mm/hugetlb_vmemmap.h |  5 +++
 3 files changed, 92 insertions(+), 5 deletions(-)

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5131ae3d2245..ebe35532d432 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1381,6 +1381,8 @@ static void __free_hugepage(struct hstate *h, struct page *page)
 {
 	int i;
 
+	alloc_huge_page_vmemmap(h, page);
+
 	for (i = 0; i < pages_per_huge_page(h); i++) {
 		page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
 				1 << PG_referenced | 1 << PG_dirty |
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index af42fad1f131..a3714db7f400 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -95,6 +95,7 @@
 #define pr_fmt(fmt)	"HugeTLB vmemmap: " fmt
 
 #include <linux/bootmem_info.h>
+#include <linux/delay.h>
 #include "hugetlb_vmemmap.h"
 
 /*
@@ -108,6 +109,8 @@
 #define RESERVE_VMEMMAP_NR		2U
 #define RESERVE_VMEMMAP_SIZE		(RESERVE_VMEMMAP_NR << PAGE_SHIFT)
 #define VMEMMAP_TAIL_PAGE_REUSE		-1
+#define GFP_VMEMMAP_PAGE		\
+	(GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_HIGH)
 
 #ifndef VMEMMAP_HPAGE_SHIFT
 #define VMEMMAP_HPAGE_SHIFT		HPAGE_SHIFT
@@ -124,6 +127,11 @@
 	(__boundary - 1 < (end) - 1) ? __boundary : (end);		 \
 })
 
+typedef void (*vmemmap_remap_pte_func_t)(struct page *reuse, pte_t *pte,
+					 unsigned long start, unsigned long end,
+					 void *priv);
+
+
 static inline unsigned int vmemmap_pages_per_hpage(struct hstate *h)
 {
 	return free_vmemmap_pages_per_hpage(h) + RESERVE_VMEMMAP_NR;
@@ -163,9 +171,40 @@ static pmd_t *vmemmap_to_pmd(unsigned long addr)
 	return pmd;
 }
 
+static void vmemmap_restore_pte_range(struct page *reuse, pte_t *pte,
+				      unsigned long start, unsigned long end,
+				      void *priv)
+{
+	pgprot_t pgprot = PAGE_KERNEL;
+	void *from = page_to_virt(reuse);
+	unsigned long addr;
+	struct list_head *pages = priv;
+
+	for (addr = start; addr < end; addr += PAGE_SIZE) {
+		void *to;
+		struct page *page;
+
+		VM_BUG_ON(pte_none(*pte) || pte_page(*pte) != reuse);
+
+		page = list_first_entry(pages, struct page, lru);
+		list_del(&page->lru);
+		to = page_to_virt(page);
+		copy_page(to, from);
+
+		/*
+		 * Make sure that any data that writes to the @to is made
+		 * visible to the physical page.
+		 */
+		flush_kernel_vmap_range(to, PAGE_SIZE);
+
+		prepare_vmemmap_page(page);
+		set_pte_at(&init_mm, addr, pte++, mk_pte(page, pgprot));
+	}
+}
+
 static void vmemmap_reuse_pte_range(struct page *reuse, pte_t *pte,
 				    unsigned long start, unsigned long end,
-				    struct list_head *vmemmap_pages)
+				    void *priv)
 {
 	/*
 	 * Make the tail pages are mapped with read-only to catch
@@ -174,6 +213,7 @@ static void vmemmap_reuse_pte_range(struct page *reuse, pte_t *pte,
 	pgprot_t pgprot = PAGE_KERNEL_RO;
 	pte_t entry = mk_pte(reuse, pgprot);
 	unsigned long addr;
+	struct list_head *pages = priv;
 
 	for (addr = start; addr < end; addr += PAGE_SIZE, pte++) {
 		struct page *page;
@@ -181,14 +221,14 @@ static void vmemmap_reuse_pte_range(struct page *reuse, pte_t *pte,
 		VM_BUG_ON(pte_none(*pte));
 
 		page = pte_page(*pte);
-		list_add(&page->lru, vmemmap_pages);
+		list_add(&page->lru, pages);
 
 		set_pte_at(&init_mm, addr, pte, entry);
 	}
 }
 
 static void vmemmap_remap_range(unsigned long start, unsigned long end,
-				struct list_head *vmemmap_pages)
+				vmemmap_remap_pte_func_t func, void *priv)
 {
 	pmd_t *pmd;
 	unsigned long next, addr = start;
@@ -208,12 +248,52 @@ static void vmemmap_remap_range(unsigned long start, unsigned long end,
 			reuse = pte_page(pte[VMEMMAP_TAIL_PAGE_REUSE]);
 
 		next = vmemmap_hpage_addr_end(addr, end);
-		vmemmap_reuse_pte_range(reuse, pte, addr, next, vmemmap_pages);
+		func(reuse, pte, addr, next, priv);
 	} while (pmd++, addr = next, addr != end);
 
 	flush_tlb_kernel_range(start, end);
 }
 
+static inline void alloc_vmemmap_pages(struct hstate *h, struct list_head *list)
+{
+	unsigned int nr = free_vmemmap_pages_per_hpage(h);
+
+	while (nr--) {
+		struct page *page;
+
+retry:
+		page = alloc_page(GFP_VMEMMAP_PAGE);
+		if (unlikely(!page)) {
+			msleep(100);
+			/*
+			 * We should retry infinitely, because we cannot
+			 * handle allocation failures. Once we allocate
+			 * vmemmap pages successfully, then we can free
+			 * a HugeTLB page.
+			 */
+			goto retry;
+		}
+		list_add_tail(&page->lru, list);
+	}
+}
+
+void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+	unsigned long start, end;
+	unsigned long vmemmap_addr = (unsigned long)head;
+	LIST_HEAD(vmemmap_pages);
+
+	if (!free_vmemmap_pages_per_hpage(h))
+		return;
+
+	alloc_vmemmap_pages(h, &vmemmap_pages);
+
+	start = vmemmap_addr + RESERVE_VMEMMAP_SIZE;
+	end = vmemmap_addr + vmemmap_pages_size_per_hpage(h);
+	vmemmap_remap_range(start, end, vmemmap_restore_pte_range,
+			    &vmemmap_pages);
+}
+
 static inline void free_vmemmap_page_list(struct list_head *list)
 {
 	struct page *page, *next;
@@ -235,7 +315,7 @@ void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 
 	start = vmemmap_addr + RESERVE_VMEMMAP_SIZE;
 	end = vmemmap_addr + vmemmap_pages_size_per_hpage(h);
-	vmemmap_remap_range(start, end, &vmemmap_pages);
+	vmemmap_remap_range(start, end, vmemmap_reuse_pte_range, &vmemmap_pages);
 
 	free_vmemmap_page_list(&vmemmap_pages);
 }
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index 293897b9f1d8..7887095488f4 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -12,6 +12,7 @@
 
 #ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
 void __init hugetlb_vmemmap_init(struct hstate *h);
+void alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
 void free_huge_page_vmemmap(struct hstate *h, struct page *head);
 
 static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
@@ -23,6 +24,10 @@ static inline void hugetlb_vmemmap_init(struct hstate *h)
 {
 }
 
+static inline void alloc_huge_page_vmemmap(struct hstate *h, struct page *head)
+{
+}
+
 static inline void free_huge_page_vmemmap(struct hstate *h, struct page *head)
 {
 }
-- 
2.11.0




[Index of Archives]     [Kernel Newbies]     [Security]     [Netfilter]     [Bugtraq]     [Linux FS]     [Yosemite Forum]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Samba]     [Video 4 Linux]     [Device Mapper]     [Linux Resources]

  Powered by Linux