On Thu, Mar 25, 2021 at 4:10 PM Joao Martins <joao.m.martins@xxxxxxxxxx> wrote: > > Much like hugetlbfs or THPs, treat device pagemaps with > compound pages like the rest of GUP handling of compound pages. > How about: "Use try_grab_compound_head() for device-dax GUP when configured with a compound pagemap." > Rather than incrementing the refcount every 4K, we record > all sub pages and increment by @refs amount *once*. "Rather than incrementing the refcount for each page, do one atomic addition for all the pages to be pinned." > > Performance measured by gup_benchmark improves considerably > get_user_pages_fast() and pin_user_pages_fast() with NVDIMMs: > > $ gup_test -f /dev/dax1.0 -m 16384 -r 10 -S [-u,-a] -n 512 -w > (get_user_pages_fast 2M pages) ~59 ms -> ~6.1 ms > (pin_user_pages_fast 2M pages) ~87 ms -> ~6.2 ms > [altmap] > (get_user_pages_fast 2M pages) ~494 ms -> ~9 ms > (pin_user_pages_fast 2M pages) ~494 ms -> ~10 ms Hmm what is altmap representing here? The altmap case does not support compound geometry, so this last test is comparing pinning this amount of memory without compound pages where the memmap is in PMEM to the speed *with* compound pages and the memmap in DRAM? > > $ gup_test -f /dev/dax1.0 -m 129022 -r 10 -S [-u,-a] -n 512 -w > (get_user_pages_fast 2M pages) ~492 ms -> ~49 ms > (pin_user_pages_fast 2M pages) ~493 ms -> ~50 ms > [altmap with -m 127004] > (get_user_pages_fast 2M pages) ~3.91 sec -> ~70 ms > (pin_user_pages_fast 2M pages) ~3.97 sec -> ~74 ms > > Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx> > --- > mm/gup.c | 52 ++++++++++++++++++++++++++++++++-------------------- > 1 file changed, 32 insertions(+), 20 deletions(-) > > diff --git a/mm/gup.c b/mm/gup.c > index b3e647c8b7ee..514f12157a0f 100644 > --- a/mm/gup.c > +++ b/mm/gup.c > @@ -2159,31 +2159,54 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, > } > #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ > > + > +static int record_subpages(struct page *page, unsigned long addr, > + unsigned long end, struct page **pages) > +{ > + int nr; > + > + for (nr = 0; addr != end; addr += PAGE_SIZE) > + pages[nr++] = page++; > + > + return nr; > +} > + > #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) > static int __gup_device_huge(unsigned long pfn, unsigned long addr, > unsigned long end, unsigned int flags, > struct page **pages, int *nr) > { > - int nr_start = *nr; > + int refs, nr_start = *nr; > struct dev_pagemap *pgmap = NULL; > > do { > - struct page *page = pfn_to_page(pfn); > + struct page *head, *page = pfn_to_page(pfn); > + unsigned long next; > > pgmap = get_dev_pagemap(pfn, pgmap); > if (unlikely(!pgmap)) { > undo_dev_pagemap(nr, nr_start, flags, pages); > return 0; > } > - SetPageReferenced(page); > - pages[*nr] = page; > - if (unlikely(!try_grab_page(page, flags))) { > - undo_dev_pagemap(nr, nr_start, flags, pages); > + > + head = compound_head(page); > + next = PageCompound(head) ? end : addr + PAGE_SIZE; This looks a tad messy, and makes assumptions that upper layers are not sending this routine multiple huge pages to map. next should be set to the next compound page, not end. > + refs = record_subpages(page, addr, next, pages + *nr); > + > + SetPageReferenced(head); > + head = try_grab_compound_head(head, refs, flags); > + if (!head) { > + if (PageCompound(head)) { @head is NULL here, I think you wanted to rename the result of try_grab_compound_head() to something like pinned_head so that you don't undo the work you did above. However I feel like there's one too PageCompund() checks. > + ClearPageReferenced(head); > + put_dev_pagemap(pgmap); > + } else { > + undo_dev_pagemap(nr, nr_start, flags, pages); > + } > return 0; > } > - (*nr)++; > - pfn++; > - } while (addr += PAGE_SIZE, addr != end); > + *nr += refs; > + pfn += refs; > + } while (addr += (refs << PAGE_SHIFT), addr != end); > > if (pgmap) > put_dev_pagemap(pgmap); > @@ -2243,17 +2266,6 @@ static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, > } > #endif > > -static int record_subpages(struct page *page, unsigned long addr, > - unsigned long end, struct page **pages) > -{ > - int nr; > - > - for (nr = 0; addr != end; addr += PAGE_SIZE) > - pages[nr++] = page++; > - > - return nr; > -} > - > #ifdef CONFIG_ARCH_HAS_HUGEPD > static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, > unsigned long sz) > -- > 2.17.1 >