Currently try_to_unmap() return bool value by checking page_mapcount(), however this may return false positive since page_mapcount() doesn't check all subpages of compound page. The total_mapcount() could be used instead, but its cost is higher since it traverses all subpages. Actually the most callers of try_to_unmap() don't care about the return value at all. So just need check if page is still mapped by page_mapped() when necessary. And page_mapped() does bail out early when it finds mapped subpage. Suggested-by: Hugh Dickins <hughd@xxxxxxxxxx> Signed-off-by: Yang Shi <shy828301@xxxxxxxxx> --- include/linux/rmap.h | 2 +- mm/huge_memory.c | 4 +--- mm/memory-failure.c | 13 ++++++------- mm/rmap.c | 6 +----- mm/vmscan.c | 3 ++- 5 files changed, 11 insertions(+), 17 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index def5c62c93b3..116cb193110a 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -194,7 +194,7 @@ static inline void page_dup_rmap(struct page *page, bool compound) int page_referenced(struct page *, int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags); -bool try_to_unmap(struct page *, enum ttu_flags flags); +void try_to_unmap(struct page *, enum ttu_flags flags); /* Avoid racy checks */ #define PVMW_SYNC (1 << 0) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 19195fca1aee..80fe642d742d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2336,15 +2336,13 @@ static void unmap_page(struct page *page) { enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD; - bool unmap_success; VM_BUG_ON_PAGE(!PageHead(page), page); if (PageAnon(page)) ttu_flags |= TTU_SPLIT_FREEZE; - unmap_success = try_to_unmap(page, ttu_flags); - VM_BUG_ON_PAGE(!unmap_success, page); + try_to_unmap(page, ttu_flags); } static void remap_page(struct page *page, unsigned int nr) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9dcc9bcea731..6dd53ff34825 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1126,7 +1126,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); if (!PageHuge(hpage)) { - unmap_success = try_to_unmap(hpage, ttu); + try_to_unmap(hpage, ttu); } else { if (!PageAnon(hpage)) { /* @@ -1138,17 +1138,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, */ mapping = hugetlb_page_mapping_lock_write(hpage); if (mapping) { - unmap_success = try_to_unmap(hpage, - ttu|TTU_RMAP_LOCKED); + try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED); i_mmap_unlock_write(mapping); - } else { + } else pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn); - unmap_success = false; - } } else { - unmap_success = try_to_unmap(hpage, ttu); + try_to_unmap(hpage, ttu); } } + + unmap_success = !page_mapped(hpage); if (!unmap_success) pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", pfn, page_mapcount(hpage)); diff --git a/mm/rmap.c b/mm/rmap.c index a35cbbbded0d..728de421e43a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1748,10 +1748,8 @@ static int page_not_mapped(struct page *page) * * Tries to remove all the page table entries which are mapping this * page, used in the pageout path. Caller must hold the page lock. - * - * If unmap is successful, return true. Otherwise, false. */ -bool try_to_unmap(struct page *page, enum ttu_flags flags) +void try_to_unmap(struct page *page, enum ttu_flags flags) { struct rmap_walk_control rwc = { .rmap_one = try_to_unmap_one, @@ -1776,8 +1774,6 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags) rmap_walk_locked(page, &rwc); else rmap_walk(page, &rwc); - - return !page_mapcount(page) ? true : false; } /** diff --git a/mm/vmscan.c b/mm/vmscan.c index f96d62159720..fa5052ace415 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list, if (unlikely(PageTransHuge(page))) flags |= TTU_SPLIT_HUGE_PMD; - if (!try_to_unmap(page, flags)) { + try_to_unmap(page, flags); + if (page_mapped(page)) { stat->nr_unmap_fail += nr_pages; if (!was_swapbacked && PageSwapBacked(page)) stat->nr_lazyfree_fail += nr_pages; -- 2.26.2