Replace three calls to compound_head() with one. This removes the last user of compound_pincount(), so remove that helper too. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- include/linux/mm.h | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index bf9624ca61c3..d3769897c8ac 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -934,13 +934,6 @@ static inline int head_compound_pincount(struct page *head) return atomic_read(compound_pincount_ptr(head)); } -static inline int compound_pincount(struct page *page) -{ - VM_BUG_ON_PAGE(!PageCompound(page), page); - page = compound_head(page); - return head_compound_pincount(page); -} - static inline void set_compound_order(struct page *page, unsigned int order) { page[1].compound_order = order; @@ -1340,18 +1333,20 @@ void unpin_user_pages(struct page **pages, unsigned long npages); */ static inline bool page_maybe_dma_pinned(struct page *page) { - if (PageCompound(page)) - return compound_pincount(page) > 0; + struct folio *folio = page_folio(page); + + if (folio_test_large(folio)) + return atomic_read(folio_pincount_ptr(folio)) > 0; /* - * page_ref_count() is signed. If that refcount overflows, then - * page_ref_count() returns a negative value, and callers will avoid + * folio_ref_count() is signed. If that refcount overflows, then + * folio_ref_count() returns a negative value, and callers will avoid * further incrementing the refcount. * - * Here, for that overflow case, use the signed bit to count a little + * Here, for that overflow case, use the sign bit to count a little * bit higher via unsigned math, and thus still get an accurate result. */ - return ((unsigned int)page_ref_count(compound_head(page))) >= + return ((unsigned int)folio_ref_count(folio)) >= GUP_PIN_COUNTING_BIAS; } -- 2.33.0