Later in the series, in madvise collapse context, we will want to optionally ignore MADV_NOHUGEPAGE. However, we'd also like to standardize on __transparent_hugepage_enabled() for determining anon thp eligibility. Add a new argument to __transparent_hugepage_enabled() which represents the vma flags to be used instead of those in vma->vm_flags for VM_[NO]HUGEPAGE checks. I.e. checks inside __transparent_hugepage_enabled() which previously didn't care about madvise settings, such as dax check, or stack check, are unaffected. Signed-off-by: Zach O'Keefe <zokeefe@xxxxxxxxxx> --- include/linux/huge_mm.h | 14 ++++++++++---- mm/huge_memory.c | 2 +- mm/memory.c | 6 ++++-- 3 files changed, 15 insertions(+), 7 deletions(-) diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 2999190adc22..fd905b0b2c71 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -143,8 +143,13 @@ static inline bool transhuge_vma_enabled(struct vm_area_struct *vma, /* * to be used on vmas which are known to support THP. * Use transparent_hugepage_active otherwise + * + * madv_thp_vm_flags are used instead of vma->vm_flags for VM_NOHUGEPAGE + * and VM_HUGEPAGE. Principal use is ignoring VM_NOHUGEPAGE when in madvise + * collapse context. */ -static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) +static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma, + unsigned long madv_thp_vm_flags) { /* @@ -153,7 +158,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX)) return false; - if (!transhuge_vma_enabled(vma, vma->vm_flags)) + if (!transhuge_vma_enabled(vma, madv_thp_vm_flags)) return false; if (vma_is_temporary_stack(vma)) @@ -167,7 +172,7 @@ static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)) - return !!(vma->vm_flags & VM_HUGEPAGE); + return !!(madv_thp_vm_flags & VM_HUGEPAGE); return false; } @@ -316,7 +321,8 @@ static inline bool folio_test_pmd_mappable(struct folio *folio) return false; } -static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma) +static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma, + unsigned long madv_thp_vm_flags) { return false; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3557aabe86fe..25b7590b9846 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -83,7 +83,7 @@ bool transparent_hugepage_active(struct vm_area_struct *vma) if (!transhuge_vma_suitable(vma, addr)) return false; if (vma_is_anonymous(vma)) - return __transparent_hugepage_enabled(vma); + return __transparent_hugepage_enabled(vma, vma->vm_flags); if (vma_is_shmem(vma)) return shmem_huge_enabled(vma); if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) diff --git a/mm/memory.c b/mm/memory.c index 4499cf09c21f..a6f2a8a20329 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4695,7 +4695,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (!vmf.pud) return VM_FAULT_OOM; retry_pud: - if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) { + if (pud_none(*vmf.pud) && + __transparent_hugepage_enabled(vma, vma->vm_flags)) { ret = create_huge_pud(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; @@ -4726,7 +4727,8 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (pud_trans_unstable(vmf.pud)) goto retry_pud; - if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) { + if (pmd_none(*vmf.pmd) && + __transparent_hugepage_enabled(vma, vma->vm_flags)) { ret = create_huge_pmd(&vmf); if (!(ret & VM_FAULT_FALLBACK)) return ret; -- 2.35.1.616.g0bdcbb4464-goog