From: "Alex Shi (tencent)" <alexs@xxxxxxxxxx> The ksm only contains single pages, so we could add a new func ksm_get_folio for get_ksm_page to use folio instead of pages to save a couple of compound_head calls. After all caller replaced, get_ksm_page will be removed. Signed-off-by: Alex Shi (tencent) <alexs@xxxxxxxxxx> Cc: Izik Eidus <izik.eidus@xxxxxxxxxxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Chris Wright <chrisw@xxxxxxxxxxxx> --- mm/ksm.c | 40 ++++++++++++++++++++++++---------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 8c001819cf10..ac080235b002 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -915,10 +915,10 @@ enum get_ksm_page_flags { * a page to put something that might look like our key in page->mapping. * is on its way to being freed; but it is an anomaly to bear in mind. */ -static struct page *get_ksm_page(struct ksm_stable_node *stable_node, +static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node, enum get_ksm_page_flags flags) { - struct page *page; + struct folio *folio; void *expected_mapping; unsigned long kpfn; @@ -926,8 +926,8 @@ static struct page *get_ksm_page(struct ksm_stable_node *stable_node, PAGE_MAPPING_KSM); again: kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */ - page = pfn_to_page(kpfn); - if (READ_ONCE(page->mapping) != expected_mapping) + folio = pfn_folio(kpfn); + if (READ_ONCE(folio->mapping) != expected_mapping) goto stale; /* @@ -940,41 +940,41 @@ static struct page *get_ksm_page(struct ksm_stable_node *stable_node, * in folio_migrate_mapping(), it might still be our page, * in which case it's essential to keep the node. */ - while (!get_page_unless_zero(page)) { + while (!folio_try_get(folio)) { /* * Another check for page->mapping != expected_mapping would * work here too. We have chosen the !PageSwapCache test to * optimize the common case, when the page is or is about to * be freed: PageSwapCache is cleared (under spin_lock_irq) * in the ref_freeze section of __remove_mapping(); but Anon - * page->mapping reset to NULL later, in free_pages_prepare(). + * folio->mapping reset to NULL later, in free_pages_prepare(). */ - if (!PageSwapCache(page)) + if (!folio_test_swapcache(folio)) goto stale; cpu_relax(); } - if (READ_ONCE(page->mapping) != expected_mapping) { - put_page(page); + if (READ_ONCE(folio->mapping) != expected_mapping) { + folio_put(folio); goto stale; } if (flags == GET_KSM_PAGE_TRYLOCK) { - if (!trylock_page(page)) { - put_page(page); + if (!folio_trylock(folio)) { + folio_put(folio); return ERR_PTR(-EBUSY); } } else if (flags == GET_KSM_PAGE_LOCK) - lock_page(page); + folio_lock(folio); if (flags != GET_KSM_PAGE_NOLOCK) { - if (READ_ONCE(page->mapping) != expected_mapping) { - unlock_page(page); - put_page(page); + if (READ_ONCE(folio->mapping) != expected_mapping) { + folio_unlock(folio); + folio_put(folio); goto stale; } } - return page; + return folio; stale: /* @@ -990,6 +990,14 @@ static struct page *get_ksm_page(struct ksm_stable_node *stable_node, return NULL; } +static struct page *get_ksm_page(struct ksm_stable_node *stable_node, + enum get_ksm_page_flags flags) +{ + struct folio *folio = ksm_get_folio(stable_node, flags); + + return &folio->page; +} + /* * Removing rmap_item from stable or unstable tree. * This function will clean the information from the stable/unstable tree. -- 2.43.0