[PATCH v2] mm: add more readable thp_vma_allowable_order_foo()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



There are too many bool arguments in thp_vma_allowable_orders(), adding
some more readable thp_vma_allowable_order_foo(),

  thp_vma_allowable_orders_smaps() is used in smaps
  thp_vma_allowable_order[s]_pf()  is used in page fault
  thp_vma_allowable_order_khugepaged() is used in khugepaged scan and madvise

Reviewed-by: Ryan Roberts <ryan.roberts@xxxxxxx>
Signed-off-by: Kefeng Wang <wangkefeng.wang@xxxxxxxxxx>
---
v2:
- use new thp_vma_allowable_order_khugepaged() naming, suggested by
  Ryan/David 

 fs/proc/task_mmu.c      |  3 +--
 include/linux/huge_mm.h | 14 ++++++++++++--
 mm/khugepaged.c         | 24 ++++++++++++------------
 mm/memory.c             |  8 ++++----
 4 files changed, 29 insertions(+), 20 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f4259b7edfde..e95ec49bf190 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -871,8 +871,7 @@ static int show_smap(struct seq_file *m, void *v)
 	__show_smap(m, &mss, false);
 
 	seq_printf(m, "THPeligible:    %8u\n",
-		   !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
-					      true, THP_ORDERS_ALL));
+		   thp_vma_allowable_orders_smaps(vma, vma->vm_flags));
 
 	if (arch_pkeys_enabled())
 		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 56c7ea73090b..87409e87c241 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -83,8 +83,18 @@ extern struct kobj_attribute shmem_enabled_attr;
  */
 #define THP_ORDERS_ALL		(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
 
-#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
-	(!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+#define thp_vma_allowable_orders_smaps(vma, vm_flags) \
+	(!!thp_vma_allowable_orders(vma, vm_flags, true, false, true, THP_ORDERS_ALL))
+
+#define thp_vma_allowable_orders_pf(vma, vm_flags, orders) \
+	(!!thp_vma_allowable_orders(vma, vm_flags, false, true, true, orders))
+
+#define thp_vma_allowable_order_pf(vma, vm_flags, order) \
+	(!!thp_vma_allowable_orders_pf(vma, vm_flags, BIT(order)))
+
+#define thp_vma_allowable_order_khugepaged(vma, vm_flags, enforce_sysfs, order) \
+	(!!thp_vma_allowable_orders(vma, vm_flags, false, false, enforce_sysfs, BIT(order)))
+
 
 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
 #define HPAGE_PMD_SHIFT PMD_SHIFT
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 2f73d2aa9ae8..006c8c9a5b68 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -453,8 +453,8 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
 {
 	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
 	    hugepage_flags_enabled()) {
-		if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
-					    PMD_ORDER))
+		if (thp_vma_allowable_order_khugepaged(vma, vm_flags, true,
+						       PMD_ORDER))
 			__khugepaged_enter(vma->vm_mm);
 	}
 }
@@ -909,15 +909,15 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
 
 	if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
 		return SCAN_ADDRESS_RANGE;
-	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
-				     cc->is_khugepaged, PMD_ORDER))
+	if (!thp_vma_allowable_order_khugepaged(vma, vma->vm_flags,
+						cc->is_khugepaged, PMD_ORDER))
 		return SCAN_VMA_CHECK;
 	/*
 	 * Anon VMA expected, the address may be unmapped then
 	 * remapped to file after khugepaged reaquired the mmap_lock.
 	 *
-	 * thp_vma_allowable_order may return true for qualified file
-	 * vmas.
+	 * thp_vma_allowable_order_khugepaged may return true for
+	 * qualified file vmas.
 	 */
 	if (expect_anon && (!(*vmap)->anon_vma || !vma_is_anonymous(*vmap)))
 		return SCAN_PAGE_ANON;
@@ -1493,8 +1493,8 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
 	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
 	 * analogously elide sysfs THP settings here.
 	 */
-	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
-				     PMD_ORDER))
+	if (!thp_vma_allowable_order_khugepaged(vma, vma->vm_flags, false,
+						PMD_ORDER))
 		return SCAN_VMA_CHECK;
 
 	/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -2355,8 +2355,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
 			progress++;
 			break;
 		}
-		if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
-					     true, PMD_ORDER)) {
+		if (!thp_vma_allowable_order_khugepaged(vma, vma->vm_flags, true,
+							PMD_ORDER)) {
 skip:
 			progress++;
 			continue;
@@ -2693,8 +2693,8 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
 
 	*prev = vma;
 
-	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
-				     PMD_ORDER))
+	if (!thp_vma_allowable_order_khugepaged(vma, vma->vm_flags, false,
+						PMD_ORDER))
 		return -EINVAL;
 
 	cc = kmalloc(sizeof(*cc), GFP_KERNEL);
diff --git a/mm/memory.c b/mm/memory.c
index 09ed76e5b8c0..a1255fb2c709 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4329,8 +4329,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
 	 * for this vma. Then filter out the orders that can't be allocated over
 	 * the faulting address and still be fully contained in the vma.
 	 */
-	orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
-					  BIT(PMD_ORDER) - 1);
+	orders = thp_vma_allowable_orders_pf(vma, vma->vm_flags,
+					     BIT(PMD_ORDER) - 1);
 	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
 
 	if (!orders)
@@ -5433,7 +5433,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 		return VM_FAULT_OOM;
 retry_pud:
 	if (pud_none(*vmf.pud) &&
-	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
+	    thp_vma_allowable_order_pf(vma, vm_flags, PUD_ORDER)) {
 		ret = create_huge_pud(&vmf);
 		if (!(ret & VM_FAULT_FALLBACK))
 			return ret;
@@ -5467,7 +5467,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
 		goto retry_pud;
 
 	if (pmd_none(*vmf.pmd) &&
-	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
+	    thp_vma_allowable_order_pf(vma, vm_flags, PMD_ORDER)) {
 		ret = create_huge_pmd(&vmf);
 		if (!(ret & VM_FAULT_FALLBACK))
 			return ret;
-- 
2.41.0





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux