Use try_grab_compound_head() for device-dax GUP when configured with a compound devmap. Rather than incrementing the refcount for each page, do one atomic addition for all the pages to be pinned. Performance measured by gup_benchmark improves considerably get_user_pages_fast() and pin_user_pages_fast() with NVDIMMs: $ gup_test -f /dev/dax1.0 -m 16384 -r 10 -S [-u,-a] -n 512 -w (get_user_pages_fast 2M pages) ~59 ms -> ~6.1 ms (pin_user_pages_fast 2M pages) ~87 ms -> ~6.2 ms [altmap] (get_user_pages_fast 2M pages) ~494 ms -> ~9 ms (pin_user_pages_fast 2M pages) ~494 ms -> ~10 ms $ gup_test -f /dev/dax1.0 -m 129022 -r 10 -S [-u,-a] -n 512 -w (get_user_pages_fast 2M pages) ~492 ms -> ~49 ms (pin_user_pages_fast 2M pages) ~493 ms -> ~50 ms [altmap with -m 127004] (get_user_pages_fast 2M pages) ~3.91 sec -> ~70 ms (pin_user_pages_fast 2M pages) ~3.97 sec -> ~74 ms Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx> Reviewed-by: Dan Williams <dan.j.williams@xxxxxxxxx> --- mm/gup.c | 51 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index 7a406d79bd2e..0741d2c0ba5e 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2234,17 +2234,30 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, } #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ + +static int record_subpages(struct page *page, unsigned long addr, + unsigned long end, struct page **pages) +{ + int nr; + + for (nr = 0; addr != end; addr += PAGE_SIZE) + pages[nr++] = page++; + + return nr; +} + #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && defined(CONFIG_TRANSPARENT_HUGEPAGE) static int __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { - int nr_start = *nr; + int refs, nr_start = *nr; struct dev_pagemap *pgmap = NULL; int ret = 1; do { - struct page *page = pfn_to_page(pfn); + struct page *head, *page = pfn_to_page(pfn); + unsigned long next = addr + PAGE_SIZE; pgmap = get_dev_pagemap(pfn, pgmap); if (unlikely(!pgmap)) { @@ -2252,16 +2265,25 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr, ret = 0; break; } - SetPageReferenced(page); - pages[*nr] = page; - if (unlikely(!try_grab_page(page, flags))) { - undo_dev_pagemap(nr, nr_start, flags, pages); + + head = compound_head(page); + /* @end is assumed to be limited at most one compound page */ + if (PageHead(head)) + next = end; + refs = record_subpages(page, addr, next, pages + *nr); + + SetPageReferenced(head); + if (unlikely(!try_grab_compound_head(head, refs, flags))) { + if (PageHead(head)) + ClearPageReferenced(head); + else + undo_dev_pagemap(nr, nr_start, flags, pages); ret = 0; break; } - (*nr)++; - pfn++; - } while (addr += PAGE_SIZE, addr != end); + *nr += refs; + pfn += refs; + } while (addr += (refs << PAGE_SHIFT), addr != end); put_dev_pagemap(pgmap); return ret; @@ -2320,17 +2342,6 @@ static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, } #endif -static int record_subpages(struct page *page, unsigned long addr, - unsigned long end, struct page **pages) -{ - int nr; - - for (nr = 0; addr != end; addr += PAGE_SIZE) - pages[nr++] = page++; - - return nr; -} - #ifdef CONFIG_ARCH_HAS_HUGEPD static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, unsigned long sz) -- 2.17.1