Re: [v4 PATCH 2/7] mm: thp: consolidate vma size check to transhuge_vma_suitable

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 15 Jun 10:29, Yang Shi wrote:
> There are couple of places that check whether the vma size is ok for
> THP or whether address fits, they are open coded and duplicate, use
> transhuge_vma_suitable() to do the job by passing in (vma->end -
> HPAGE_PMD_SIZE).
> 
> Move vma size check into hugepage_vma_check().  This will make
> khugepaged_enter() is as same as khugepaged_enter_vma().  There is just
> one caller for khugepaged_enter(), replace it to khugepaged_enter_vma()
> and remove khugepaged_enter().
> 
> Signed-off-by: Yang Shi <shy828301@xxxxxxxxx>
> ---
>  include/linux/huge_mm.h    | 11 +++++++++++
>  include/linux/khugepaged.h | 14 --------------
>  mm/huge_memory.c           |  2 +-
>  mm/khugepaged.c            | 19 ++++++-------------
>  4 files changed, 18 insertions(+), 28 deletions(-)
> 
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index 648cb3ce7099..8a5a8bfce0f5 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -116,6 +116,17 @@ extern struct kobj_attribute shmem_enabled_attr;
>  
>  extern unsigned long transparent_hugepage_flags;
>  
> +/*
> + * Do the below checks:
> + *   - For file vma, check if the linear page offset of vma is
> + *     HPAGE_PMD_NR aligned within the file.  The hugepage is
> + *     guaranteed to be hugepage-aligned within the file, but we must
> + *     check that the PMD-aligned addresses in the VMA map to
> + *     PMD-aligned offsets within the file, else the hugepage will
> + *     not be PMD-mappable.
> + *   - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
> + *     area.
> + */
>  static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
>  		unsigned long addr)
>  {
> diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
> index 392d34c3c59a..31ca8a7f78f4 100644
> --- a/include/linux/khugepaged.h
> +++ b/include/linux/khugepaged.h
> @@ -51,16 +51,6 @@ static inline void khugepaged_exit(struct mm_struct *mm)
>  	if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
>  		__khugepaged_exit(mm);
>  }
> -
> -static inline void khugepaged_enter(struct vm_area_struct *vma,
> -				   unsigned long vm_flags)
> -{
> -	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
> -	    khugepaged_enabled()) {
> -		if (hugepage_vma_check(vma, vm_flags))
> -			__khugepaged_enter(vma->vm_mm);
> -	}
> -}
>  #else /* CONFIG_TRANSPARENT_HUGEPAGE */
>  static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
>  {
> @@ -68,10 +58,6 @@ static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm
>  static inline void khugepaged_exit(struct mm_struct *mm)
>  {
>  }
> -static inline void khugepaged_enter(struct vm_area_struct *vma,
> -				    unsigned long vm_flags)
> -{
> -}
>  static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
>  					unsigned long vm_flags)
>  {
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 4f9bbb4eab23..b530462c4493 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -726,7 +726,7 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
>  		return VM_FAULT_FALLBACK;
>  	if (unlikely(anon_vma_prepare(vma)))
>  		return VM_FAULT_OOM;
> -	khugepaged_enter(vma, vma->vm_flags);
> +	khugepaged_enter_vma(vma, vma->vm_flags);
>  
>  	if (!(vmf->flags & FAULT_FLAG_WRITE) &&
>  			!mm_forbids_zeropage(vma->vm_mm) &&
> diff --git a/mm/khugepaged.c b/mm/khugepaged.c
> index b1dab94c0f1e..db0b334a7d1f 100644
> --- a/mm/khugepaged.c
> +++ b/mm/khugepaged.c
> @@ -450,8 +450,8 @@ bool hugepage_vma_check(struct vm_area_struct *vma,
>  	if (vma_is_dax(vma))
>  		return false;
>  
> -	if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
> -				vma->vm_pgoff, HPAGE_PMD_NR))
> +	/* Check alignment for file vma and size for both file and anon vma */
> +	if (!transhuge_vma_suitable(vma, (vma->vm_end - HPAGE_PMD_SIZE)))
>  		return false;
>  
>  	/* Enabled via shmem mount options or sysfs settings. */
> @@ -512,9 +512,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
>  			  unsigned long vm_flags)
>  {
>  	if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
> -	    khugepaged_enabled() &&
> -	    (((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
> -	     (vma->vm_end & HPAGE_PMD_MASK))) {
> +	    khugepaged_enabled()) {
>  		if (hugepage_vma_check(vma, vm_flags))
>  			__khugepaged_enter(vma->vm_mm);
>  	}
> @@ -950,7 +948,6 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
>  		struct vm_area_struct **vmap)
>  {
>  	struct vm_area_struct *vma;
> -	unsigned long hstart, hend;
>  
>  	if (unlikely(khugepaged_test_exit(mm)))
>  		return SCAN_ANY_PROCESS;
> @@ -959,9 +956,7 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
>  	if (!vma)
>  		return SCAN_VMA_NULL;
>  
> -	hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
> -	hend = vma->vm_end & HPAGE_PMD_MASK;
> -	if (address < hstart || address + HPAGE_PMD_SIZE > hend)
> +	if (!transhuge_vma_suitable(vma, address))
>  		return SCAN_ADDRESS_RANGE;
>  	if (!hugepage_vma_check(vma, vma->vm_flags))
>  		return SCAN_VMA_CHECK;
> @@ -2147,10 +2142,8 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
>  			progress++;
>  			continue;
>  		}
> -		hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
> -		hend = vma->vm_end & HPAGE_PMD_MASK;
> -		if (hstart >= hend)
> -			goto skip;
> +		hstart = round_up(vma->vm_start, HPAGE_PMD_SIZE);
> +		hend = round_down(vma->vm_end, HPAGE_PMD_SIZE);
>  		if (khugepaged_scan.address > hend)
>  			goto skip;
>  		if (khugepaged_scan.address < hstart)
> -- 
> 2.26.3
> 

Thanks for the patches, Yang.

Reviewed-by: Zach O'Keefe <zokeefe@xxxxxxxxxx>




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux