In preparation for allocating frozen pages, stop initialising the page refcount in post_alloc_hook(). Reviewed-by: Miaohe Lin <linmiaohe@xxxxxxxxxx> Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/compaction.c | 2 ++ mm/internal.h | 3 +-- mm/page_alloc.c | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index 6009f5d1021a..2915a13b34a5 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -83,6 +83,7 @@ static inline bool is_via_compact_memory(int order) { return false; } static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags) { post_alloc_hook(page, order, __GFP_MOVABLE); + set_page_refcounted(page); return page; } #define mark_allocated(...) alloc_hooks(mark_allocated_noprof(__VA_ARGS__)) @@ -1868,6 +1869,7 @@ static struct folio *compaction_alloc_noprof(struct folio *src, unsigned long da dst = (struct folio *)freepage; post_alloc_hook(&dst->page, order, __GFP_MOVABLE); + set_page_refcounted(&dst->page); if (order) prep_compound_page(&dst->page, order); cc->nr_freepages -= 1 << order; diff --git a/mm/internal.h b/mm/internal.h index ca400c70199c..9cc5fdc614cf 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -735,8 +735,7 @@ static inline void prep_compound_tail(struct page *head, int tail_idx) extern void prep_compound_page(struct page *page, unsigned int order); -extern void post_alloc_hook(struct page *page, unsigned int order, - gfp_t gfp_flags); +void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern bool free_pages_prepare(struct page *page, unsigned int order); extern int user_min_free_kbytes; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index adac485e3254..e3a4aaf437f9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1506,7 +1506,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order, int i; set_page_private(page, 0); - set_page_refcounted(page); arch_alloc_page(page, order); debug_pagealloc_map_pages(page, 1 << order); @@ -1562,6 +1561,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags unsigned int alloc_flags) { post_alloc_hook(page, order, gfp_flags); + set_page_refcounted(page); if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); @@ -6394,6 +6394,7 @@ static void split_free_pages(struct list_head *list) int i; post_alloc_hook(page, order, __GFP_MOVABLE); + set_page_refcounted(page); if (!order) continue; -- 2.45.2