Re: [PATCH v2] mm: add more readable thp_vma_allowable_order_foo()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 





On 2024/4/25 12:00, Matthew Wilcox wrote:
On Thu, Apr 25, 2024 at 11:51:08AM +0800, Kefeng Wang wrote:
There are too many bool arguments in thp_vma_allowable_orders(), adding
some more readable thp_vma_allowable_order_foo(),

Here's an alternative approach I came up with and forgot to send out.
I take no position on which is better.

Always confuse, either way is fine, even combine the two way, let's see
Ryan/David's option.


commit a761d4b9cf14
Author: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
Date:   Tue Apr 16 00:25:09 2024 -0400

     mm: Simplify thp_vma_allowable_order
Combine the three boolean arguments into one flags argument for
     readability.
Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 23fbab954c20..0ffa8902f973 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -866,8 +866,8 @@ static int show_smap(struct seq_file *m, void *v)
  	__show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %8u\n",
-		   !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
-					      true, THP_ORDERS_ALL));
+		   !!thp_vma_allowable_orders(vma, vma->vm_flags,
+			   TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
if (arch_pkeys_enabled())
  		seq_printf(m, "ProtectionKey:  %8u\n", vma_pkey(vma));
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index de0c89105076..0d0ba39b86ae 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -84,8 +84,12 @@ extern struct kobj_attribute shmem_enabled_attr;
   */
  #define THP_ORDERS_ALL		(THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
-#define thp_vma_allowable_order(vma, vm_flags, smaps, in_pf, enforce_sysfs, order) \
-	(!!thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf, enforce_sysfs, BIT(order)))
+#define TVA_SMAPS		(1 << 0)	/* Will be used for procfs */
+#define TVA_IN_PF		(1 << 1)	/* Page fault handler */
+#define TVA_ENFORCE_SYSFS	(1 << 2)	/* Obey sysfs configuration */
+
+#define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
+	(!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
  #define HPAGE_PMD_SHIFT PMD_SHIFT
@@ -210,17 +214,15 @@ static inline bool file_thp_enabled(struct vm_area_struct *vma)
  }
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
-					 unsigned long vm_flags, bool smaps,
-					 bool in_pf, bool enforce_sysfs,
+					 unsigned long vm_flags,
+					 unsigned long tva_flags,
  					 unsigned long orders);
/**
   * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
   * @vma:  the vm area to check
   * @vm_flags: use these vm_flags instead of vma->vm_flags
- * @smaps: whether answer will be used for smaps file
- * @in_pf: whether answer will be used by page fault handler
- * @enforce_sysfs: whether sysfs config should be taken into account
+ * @tva_flags: Which TVA flags to honour
   * @orders: bitfield of all orders to consider
   *
   * Calculates the intersection of the requested hugepage orders and the allowed
@@ -233,12 +235,12 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
   */
  static inline
  unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
-				       unsigned long vm_flags, bool smaps,
-				       bool in_pf, bool enforce_sysfs,
+				       unsigned long vm_flags,
+				       unsigned long tva_flags,
  				       unsigned long orders)
  {
  	/* Optimization to check if required orders are enabled early. */
-	if (enforce_sysfs && vma_is_anonymous(vma)) {
+	if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
  		unsigned long mask = READ_ONCE(huge_anon_orders_always);
if (vm_flags & VM_HUGEPAGE)
@@ -252,8 +254,7 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
  			return 0;
  	}
- return __thp_vma_allowable_orders(vma, vm_flags, smaps, in_pf,
-					  enforce_sysfs, orders);
+	return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
  }
#define transparent_hugepage_use_zero_page() \
@@ -404,8 +405,8 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
  }
static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
-					unsigned long vm_flags, bool smaps,
-					bool in_pf, bool enforce_sysfs,
+					unsigned long vm_flags,
+					unsigned long tva_flags,
  					unsigned long orders)
  {
  	return 0;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8bc4ffd4725e..5d3d9c0c4153 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -80,10 +80,13 @@ unsigned long huge_anon_orders_madvise __read_mostly;
  unsigned long huge_anon_orders_inherit __read_mostly;
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
-					 unsigned long vm_flags, bool smaps,
-					 bool in_pf, bool enforce_sysfs,
+					 unsigned long vm_flags,
+					 unsigned long tva_flags,
  					 unsigned long orders)
  {
+	bool smaps = tva_flags & TVA_SMAPS;
+	bool in_pf = tva_flags & TVA_IN_PF;
+	bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
  	/* Check the intersection of requested and supported orders. */
  	orders &= vma_is_anonymous(vma) ?
  			THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 38830174608f..9642d3c6ee7e 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -453,7 +453,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
  {
  	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
  	    hugepage_flags_enabled()) {
-		if (thp_vma_allowable_order(vma, vm_flags, false, false, true,
+		if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
  					    PMD_ORDER))
  			__khugepaged_enter(vma->vm_mm);
  	}
@@ -917,6 +917,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
  				   struct collapse_control *cc)
  {
  	struct vm_area_struct *vma;
+	unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
if (unlikely(hpage_collapse_test_exit_or_disable(mm)))
  		return SCAN_ANY_PROCESS;
@@ -927,8 +928,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
if (!thp_vma_suitable_order(vma, address, PMD_ORDER))
  		return SCAN_ADDRESS_RANGE;
-	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
-				     cc->is_khugepaged, PMD_ORDER))
+	if (!thp_vma_allowable_order(vma, vma->vm_flags, tva_flags, PMD_ORDER))
  		return SCAN_VMA_CHECK;
  	/*
  	 * Anon VMA expected, the address may be unmapped then
@@ -1510,8 +1510,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
  	 * and map it by a PMD, regardless of sysfs THP settings. As such, let's
  	 * analogously elide sysfs THP settings here.
  	 */
-	if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
-				     PMD_ORDER))
+	if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
  		return SCAN_VMA_CHECK;
/* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
@@ -2376,8 +2375,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
  			progress++;
  			break;
  		}
-		if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false,
-					     true, PMD_ORDER)) {
+		if (!thp_vma_allowable_order(vma, vma->vm_flags,
+					TVA_ENFORCE_SYSFS, PMD_ORDER)) {
  skip:
  			progress++;
  			continue;
@@ -2714,8 +2713,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
*prev = vma; - if (!thp_vma_allowable_order(vma, vma->vm_flags, false, false, false,
-				     PMD_ORDER))
+	if (!thp_vma_allowable_order(vma, vma->vm_flags, 0, PMD_ORDER))
  		return -EINVAL;
cc = kmalloc(sizeof(*cc), GFP_KERNEL);
diff --git a/mm/memory.c b/mm/memory.c
index 5624b881b662..287f7d6eb9ed 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4346,8 +4346,8 @@ static struct folio *alloc_anon_folio(struct vm_fault *vmf)
  	 * for this vma. Then filter out the orders that can't be allocated over
  	 * the faulting address and still be fully contained in the vma.
  	 */
-	orders = thp_vma_allowable_orders(vma, vma->vm_flags, false, true, true,
-					  BIT(PMD_ORDER) - 1);
+	orders = thp_vma_allowable_orders(vma, vma->vm_flags,
+			TVA_IN_PF | TVA_ENFORCE_SYSFS, BIT(PMD_ORDER) - 1);
  	orders = thp_vma_suitable_orders(vma, vmf->address, orders);
if (!orders)
@@ -5395,7 +5395,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
  		return VM_FAULT_OOM;
  retry_pud:
  	if (pud_none(*vmf.pud) &&
-	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PUD_ORDER)) {
+	    thp_vma_allowable_order(vma, vm_flags,
+				TVA_IN_PF | TVA_ENFORCE_SYSFS, PUD_ORDER)) {
  		ret = create_huge_pud(&vmf);
  		if (!(ret & VM_FAULT_FALLBACK))
  			return ret;
@@ -5429,7 +5430,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
  		goto retry_pud;
if (pmd_none(*vmf.pmd) &&
-	    thp_vma_allowable_order(vma, vm_flags, false, true, true, PMD_ORDER)) {
+	    thp_vma_allowable_order(vma, vm_flags,
+				TVA_IN_PF | TVA_ENFORCE_SYSFS, PMD_ORDER)) {
  		ret = create_huge_pmd(&vmf);
  		if (!(ret & VM_FAULT_FALLBACK))
  			return ret;




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux