On Sun, Apr 07, 2024 at 05:12:42PM +0800, Muchun Song wrote: > > > On 2024/4/2 04:26, Vishal Moola (Oracle) wrote: > > hugetlb_wp() can use the struct vm_fault passed in from hugetlb_fault(). > > This alleviates the stack by consolidating 5 variables into a single > > struct. > > > > Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx> > > --- > > mm/hugetlb.c | 61 ++++++++++++++++++++++++++-------------------------- > > 1 file changed, 30 insertions(+), 31 deletions(-) > > > > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > > index aca2f11b4138..d4f26947173e 100644 > > --- a/mm/hugetlb.c > > +++ b/mm/hugetlb.c > > @@ -5918,18 +5918,16 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, > > * Keep the pte_same checks anyway to make transition from the mutex easier. > > */ > > static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, > > - unsigned long address, pte_t *ptep, unsigned int flags, > > - struct folio *pagecache_folio, spinlock_t *ptl, > > + struct folio *pagecache_folio, > > The same as comment in the previous thread. And fold the attached patch into here as well please Andrew?
>From f4adcf13ecc15a6733af43649756e53457078221 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" <vishal.moola@xxxxxxxxx> Date: Mon, 8 Apr 2024 10:21:44 -0700 Subject: [PATCH 2/2] hugetlb: Simplyfy hugetlb_wp() arguments To simplify the function arguments, as suggested by Oscar and Muchun. Suggested-by: Muchun Song <muchun.song@xxxxxxxxx> Suggested-by: Oscar Salvador <osalvador@xxxxxxx> Signed-off-by: Vishal Moola (Oracle) <vishal.moola@xxxxxxxxx> --- mm/hugetlb.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 05fe610f4699..0d96a41efde8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5915,10 +5915,11 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, * cannot race with other handlers or page migration. * Keep the pte_same checks anyway to make transition from the mutex easier. */ -static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, - struct folio *pagecache_folio, +static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; + struct mm_struct *mm = vma->vm_mm; const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; pte_t pte = huge_ptep_get(vmf->pte); struct hstate *h = hstate_vma(vma); @@ -6364,7 +6365,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, hugetlb_count_add(pages_per_huge_page(h), mm); if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { /* Optimization, do the COW without a second fault */ - ret = hugetlb_wp(mm, vma, folio, vmf); + ret = hugetlb_wp(folio, vmf); } spin_unlock(vmf->ptl); @@ -6577,7 +6578,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { if (!huge_pte_write(vmf.orig_pte)) { - ret = hugetlb_wp(mm, vma, pagecache_folio, &vmf); + ret = hugetlb_wp(pagecache_folio, &vmf); goto out_put_page; } else if (likely(flags & FAULT_FLAG_WRITE)) { vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte); -- 2.43.0