On Wed, Jul 14, 2021 at 12:36 PM Joao Martins <joao.m.martins@xxxxxxxxxx> wrote: > > Use try_grab_compound_head() for device-dax GUP when configured with a > compound pagemap. > > Rather than incrementing the refcount for each page, do one atomic > addition for all the pages to be pinned. > > Performance measured by gup_benchmark improves considerably > get_user_pages_fast() and pin_user_pages_fast() with NVDIMMs: > > $ gup_test -f /dev/dax1.0 -m 16384 -r 10 -S [-u,-a] -n 512 -w > (get_user_pages_fast 2M pages) ~59 ms -> ~6.1 ms > (pin_user_pages_fast 2M pages) ~87 ms -> ~6.2 ms > [altmap] > (get_user_pages_fast 2M pages) ~494 ms -> ~9 ms > (pin_user_pages_fast 2M pages) ~494 ms -> ~10 ms > > $ gup_test -f /dev/dax1.0 -m 129022 -r 10 -S [-u,-a] -n 512 -w > (get_user_pages_fast 2M pages) ~492 ms -> ~49 ms > (pin_user_pages_fast 2M pages) ~493 ms -> ~50 ms > [altmap with -m 127004] > (get_user_pages_fast 2M pages) ~3.91 sec -> ~70 ms > (pin_user_pages_fast 2M pages) ~3.97 sec -> ~74 ms > > Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx> > --- > mm/gup.c | 53 +++++++++++++++++++++++++++++++++-------------------- > 1 file changed, 33 insertions(+), 20 deletions(-) > > diff --git a/mm/gup.c b/mm/gup.c > index 42b8b1fa6521..9baaa1c0b7f3 100644 > --- a/mm/gup.c > +++ b/mm/gup.c > @@ -2234,31 +2234,55 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, > } > #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ > > + > +static int record_subpages(struct page *page, unsigned long addr, > + unsigned long end, struct page **pages) > +{ > + int nr; > + > + for (nr = 0; addr != end; addr += PAGE_SIZE) > + pages[nr++] = page++; > + > + return nr; > +} > + > #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) > static int __gup_device_huge(unsigned long pfn, unsigned long addr, > unsigned long end, unsigned int flags, > struct page **pages, int *nr) > { > - int nr_start = *nr; > + int refs, nr_start = *nr; > struct dev_pagemap *pgmap = NULL; > > do { > - struct page *page = pfn_to_page(pfn); > + struct page *pinned_head, *head, *page = pfn_to_page(pfn); > + unsigned long next; > > pgmap = get_dev_pagemap(pfn, pgmap); > if (unlikely(!pgmap)) { > undo_dev_pagemap(nr, nr_start, flags, pages); > return 0; > } > - SetPageReferenced(page); > - pages[*nr] = page; > - if (unlikely(!try_grab_page(page, flags))) { > - undo_dev_pagemap(nr, nr_start, flags, pages); > + > + head = compound_head(page); > + /* @end is assumed to be limited at most one compound page */ > + next = PageCompound(head) ? end : addr + PAGE_SIZE; Please no ternary operator for this check, but otherwise this patch looks good to me. Reviewed-by: Dan Williams <dan.j.williams@xxxxxxxxx> > + refs = record_subpages(page, addr, next, pages + *nr); > + > + SetPageReferenced(head); > + pinned_head = try_grab_compound_head(head, refs, flags); > + if (!pinned_head) { > + if (PageCompound(head)) { > + ClearPageReferenced(head); > + put_dev_pagemap(pgmap); > + } else { > + undo_dev_pagemap(nr, nr_start, flags, pages); > + } > return 0; > } > - (*nr)++; > - pfn++; > - } while (addr += PAGE_SIZE, addr != end); > + *nr += refs; > + pfn += refs; > + } while (addr += (refs << PAGE_SHIFT), addr != end); > > if (pgmap) > put_dev_pagemap(pgmap); > @@ -2318,17 +2342,6 @@ static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, > } > #endif > > -static int record_subpages(struct page *page, unsigned long addr, > - unsigned long end, struct page **pages) > -{ > - int nr; > - > - for (nr = 0; addr != end; addr += PAGE_SIZE) > - pages[nr++] = page++; > - > - return nr; > -} > - > #ifdef CONFIG_ARCH_HAS_HUGEPD > static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, > unsigned long sz) > -- > 2.17.1 >