FOLL_LONGTERM suggests a pin which is going to be given to hardware and can't move. It would truncate CMA permanently and should be excluded. FOLL_LONGTERM has already been checked in the slow path, but not checked in the fast path, which means a possible leak of CMA page to longterm pinned requirement through this crack. Place a check in try_get_compound_head() in the fast path. Some note about the check: Huge page's subpages have the same migrate type due to either allocation from a free_list[] or alloc_contig_range() with param MIGRATE_MOVABLE. So it is enough to check on a single subpage by is_migrate_cma_page(subpage) Signed-off-by: Pingfan Liu <kernelfans@xxxxxxxxx> Cc: Ira Weiny <ira.weiny@xxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Mike Rapoport <rppt@xxxxxxxxxxxxx> Cc: Dan Williams <dan.j.williams@xxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: John Hubbard <jhubbard@xxxxxxxxxx> Cc: "Aneesh Kumar K.V" <aneesh.kumar@xxxxxxxxxxxxx> Cc: Keith Busch <keith.busch@xxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxxxxxxxxx> Cc: Shuah Khan <shuah@xxxxxxxxxx> To: linux-mm@xxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx --- mm/gup.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index cd8075e..f0d6804 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -33,9 +33,21 @@ struct follow_page_context { * Return the compound head page with ref appropriately incremented, * or NULL if that failed. */ -static inline struct page *try_get_compound_head(struct page *page, int refs) +static inline struct page *try_get_compound_head(struct page *page, int refs, + unsigned int flags) { - struct page *head = compound_head(page); + struct page *head; + + /* + * Huge page's subpages have the same migrate type due to either + * allocation from a free_list[] or alloc_contig_range() with param + * MIGRATE_MOVABLE. So it is enough to check on a single subpage. + */ + if (unlikely(flags & FOLL_LONGTERM) && + is_migrate_cma_page(page)) + return NULL; + + head = compound_head(page); if (WARN_ON_ONCE(page_ref_count(head) < 0)) return NULL; @@ -1908,7 +1920,7 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); - head = try_get_compound_head(page, 1); + head = try_get_compound_head(page, 1, flags); if (!head) goto pte_unmap; @@ -2083,7 +2095,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, page = head + ((addr & (sz-1)) >> PAGE_SHIFT); refs = record_subpages(page, addr, end, pages + *nr); - head = try_get_compound_head(head, refs); + head = try_get_compound_head(head, refs, flags); if (!head) return 0; @@ -2142,7 +2154,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); refs = record_subpages(page, addr, end, pages + *nr); - head = try_get_compound_head(pmd_page(orig), refs); + head = try_get_compound_head(pmd_page(orig), refs, flags); if (!head) return 0; @@ -2174,7 +2186,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT); refs = record_subpages(page, addr, end, pages + *nr); - head = try_get_compound_head(pud_page(orig), refs); + head = try_get_compound_head(pud_page(orig), refs, flags); if (!head) return 0; @@ -2203,7 +2215,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, page = pgd_page(orig) + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT); refs = record_subpages(page, addr, end, pages + *nr); - head = try_get_compound_head(pgd_page(orig), refs); + head = try_get_compound_head(pgd_page(orig), refs, flags); if (!head) return 0; -- 2.7.5