Re: [PATCH 3/7] mm/hugetlb: Add page_mask for hugetlb_follow_page_mask()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 06/13/23 17:53, Peter Xu wrote:
> follow_page() doesn't need it, but we'll start to need it when unifying gup
> for hugetlb.
> 
> Signed-off-by: Peter Xu <peterx@xxxxxxxxxx>
> ---
>  include/linux/hugetlb.h | 8 +++++---
>  mm/gup.c                | 3 ++-
>  mm/hugetlb.c            | 4 +++-
>  3 files changed, 10 insertions(+), 5 deletions(-)

Reviewed-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx>
-- 
Mike Kravetz

> 
> diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
> index 21f942025fec..0d6f389d98de 100644
> --- a/include/linux/hugetlb.h
> +++ b/include/linux/hugetlb.h
> @@ -131,7 +131,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
>  int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
>  			    struct vm_area_struct *, struct vm_area_struct *);
>  struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
> -				unsigned long address, unsigned int flags);
> +				      unsigned long address, unsigned int flags,
> +				      unsigned int *page_mask);
>  long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
>  			 struct page **, unsigned long *, unsigned long *,
>  			 long, unsigned int, int *);
> @@ -297,8 +298,9 @@ static inline void adjust_range_if_pmd_sharing_possible(
>  {
>  }
>  
> -static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
> -				unsigned long address, unsigned int flags)
> +static inline struct page *hugetlb_follow_page_mask(
> +    struct vm_area_struct *vma, unsigned long address, unsigned int flags,
> +    unsigned int *page_mask)
>  {
>  	BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
>  }
> diff --git a/mm/gup.c b/mm/gup.c
> index aa0668505d61..8d59ae4554e7 100644
> --- a/mm/gup.c
> +++ b/mm/gup.c
> @@ -794,7 +794,8 @@ static struct page *follow_page_mask(struct vm_area_struct *vma,
>  	 * Ordinary GUP uses follow_hugetlb_page for hugetlb processing.
>  	 */
>  	if (is_vm_hugetlb_page(vma))
> -		return hugetlb_follow_page_mask(vma, address, flags);
> +		return hugetlb_follow_page_mask(vma, address, flags,
> +						&ctx->page_mask);
>  
>  	pgd = pgd_offset(mm, address);
>  
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 9c261921b2cf..f037eaf9d819 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -6457,7 +6457,8 @@ static inline bool __follow_hugetlb_must_fault(struct vm_area_struct *vma,
>  }
>  
>  struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
> -				unsigned long address, unsigned int flags)
> +				      unsigned long address, unsigned int flags,
> +				      unsigned int *page_mask)
>  {
>  	struct hstate *h = hstate_vma(vma);
>  	struct mm_struct *mm = vma->vm_mm;
> @@ -6506,6 +6507,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
>  		 * because we hold the ptl lock and have verified pte_present().
>  		 */
>  		WARN_ON_ONCE(try_grab_page(page, flags));
> +		*page_mask = huge_page_mask(h);
>  	}
>  out:
>  	spin_unlock(ptl);
> -- 
> 2.40.1
> 




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux