As for FOLL_LONGTERM, it is checked in the slow path __gup_longterm_unlocked(). But it is not checked in the fast path, which means a possible leak of CMA page to longterm pinned requirement through this crack. Place a check in the fast path. Signed-off-by: Pingfan Liu <kernelfans@xxxxxxxxx> Cc: Ira Weiny <ira.weiny@xxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Mike Rapoport <rppt@xxxxxxxxxxxxx> Cc: Dan Williams <dan.j.williams@xxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: John Hubbard <jhubbard@xxxxxxxxxx> Cc: "Aneesh Kumar K.V" <aneesh.kumar@xxxxxxxxxxxxx> Cc: Keith Busch <keith.busch@xxxxxxxxx> Cc: linux-kernel@xxxxxxxxxxxxxxx --- mm/gup.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/mm/gup.c b/mm/gup.c index f173fcb..6fe2feb 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2196,6 +2196,29 @@ static int __gup_longterm_unlocked(unsigned long start, int nr_pages, return ret; } +#if defined(CONFIG_CMA) +static inline int reject_cma_pages(int nr_pinned, unsigned int gup_flags, + struct page **pages) +{ + if (unlikely(gup_flags & FOLL_LONGTERM)) { + int i = 0; + + for (i = 0; i < nr_pinned; i++) + if (is_migrate_cma_page(pages[i])) { + put_user_pages(pages + i, nr_pinned - i); + return i; + } + } + return nr_pinned; +} +#else +static inline int reject_cma_pages(int nr_pinned, unsigned int gup_flags, + struct page **pages) +{ + return nr_pinned; +} +#endif + /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address @@ -2236,6 +2259,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, ret = nr; } + nr = reject_cma_pages(nr, gup_flags, pages); if (nr < nr_pages) { /* Try to get the remaining pages with get_user_pages */ start += nr << PAGE_SHIFT; -- 2.7.5