Much like hugetlbfs or THPs, treat device pagemaps with compound pages like the rest of GUP handling of compound pages. Rather than incrementing the refcount every 4K, we record all sub pages and increment by @refs amount *once*. Performance measured by gup_benchmark improves considerably get_user_pages_fast() and pin_user_pages_fast() with NVDIMMs: $ gup_test -f /dev/dax1.0 -m 16384 -r 10 -S [-u,-a] -n 512 -w (get_user_pages_fast 2M pages) ~59 ms -> ~6.1 ms (pin_user_pages_fast 2M pages) ~87 ms -> ~6.2 ms [altmap] (get_user_pages_fast 2M pages) ~494 ms -> ~9 ms (pin_user_pages_fast 2M pages) ~494 ms -> ~10 ms $ gup_test -f /dev/dax1.0 -m 129022 -r 10 -S [-u,-a] -n 512 -w (get_user_pages_fast 2M pages) ~492 ms -> ~49 ms (pin_user_pages_fast 2M pages) ~493 ms -> ~50 ms [altmap with -m 127004] (get_user_pages_fast 2M pages) ~3.91 sec -> ~70 ms (pin_user_pages_fast 2M pages) ~3.97 sec -> ~74 ms Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx> --- mm/gup.c | 52 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 20 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index b3e647c8b7ee..514f12157a0f 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2159,31 +2159,54 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ + +static int record_subpages(struct page *page, unsigned long addr, + unsigned long end, struct page **pages) +{ + int nr; + + for (nr = 0; addr != end; addr += PAGE_SIZE) + pages[nr++] = page++; + + return nr; +} + #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) static int __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { - int nr_start = *nr; + int refs, nr_start = *nr; struct dev_pagemap *pgmap = NULL; do { - struct page *page = pfn_to_page(pfn); + struct page *head, *page = pfn_to_page(pfn); + unsigned long next; pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, flags, pages); return 0; } - SetPageReferenced(page); - pages[*nr] = page; - if (unlikely(!try_grab_page(page, flags))) { - undo_dev_pagemap(nr, nr_start, flags, pages); + + head = compound_head(page); + next = PageCompound(head) ? end : addr + PAGE_SIZE; + refs = record_subpages(page, addr, next, pages + *nr); + + SetPageReferenced(head); + head = try_grab_compound_head(head, refs, flags); + if (!head) { + if (PageCompound(head)) { + ClearPageReferenced(head); + put_dev_pagemap(pgmap); + } else { + undo_dev_pagemap(nr, nr_start, flags, pages); + } return 0; } - (*nr)++; - pfn++; - } while (addr += PAGE_SIZE, addr != end); + *nr += refs; + pfn += refs; + } while (addr += (refs << PAGE_SHIFT), addr != end); if (pgmap) put_dev_pagemap(pgmap); @@ -2243,17 +2266,6 @@ static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, } #endif -static int record_subpages(struct page *page, unsigned long addr, - unsigned long end, struct page **pages) -{ - int nr; - - for (nr = 0; addr != end; addr += PAGE_SIZE) - pages[nr++] = page++; - - return nr; -} - #ifdef CONFIG_ARCH_HAS_HUGEPD static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, unsigned long sz) -- 2.17.1