From: "Alex Shi (tencent)" <alexs@xxxxxxxxxx> KSM stable tree only store single page, so convert the func users to use folio and save few compound_head calls. Signed-off-by: Alex Shi (tencent) <alexs@xxxxxxxxxx> Cc: Izik Eidus <izik.eidus@xxxxxxxxxxxxxxxxxx> Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: Hugh Dickins <hughd@xxxxxxxxxx> Cc: Chris Wright <chrisw@xxxxxxxxxxxx> --- mm/ksm.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/mm/ksm.c b/mm/ksm.c index 7188997437d3..c2afe2f926db 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -2078,7 +2078,7 @@ static struct page *stable_tree_search(struct page *page) * This function returns the stable tree node just allocated on success, * NULL otherwise. */ -static struct ksm_stable_node *stable_tree_insert(struct page *kpage) +static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio) { int nid; unsigned long kpfn; @@ -2088,7 +2088,7 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage) struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any; bool need_chain = false; - kpfn = page_to_pfn(kpage); + kpfn = folio_pfn(kfolio); nid = get_kpfn_nid(kpfn); root = root_stable_tree + nid; again: @@ -2096,13 +2096,13 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage) new = &root->rb_node; while (*new) { - struct page *tree_page; + struct folio *tree_folio; int ret; cond_resched(); stable_node = rb_entry(*new, struct ksm_stable_node, node); stable_node_any = NULL; - tree_page = chain(&stable_node_dup, stable_node, root); + tree_folio = chain(&stable_node_dup, stable_node, root); if (!stable_node_dup) { /* * Either all stable_node dups were full in @@ -2124,11 +2124,11 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage) * write protected at all times. Any will work * fine to continue the walk. */ - tree_page = get_ksm_page(stable_node_any, - GET_KSM_PAGE_NOLOCK); + tree_folio = ksm_get_folio(stable_node_any, + GET_KSM_PAGE_NOLOCK); } VM_BUG_ON(!stable_node_dup ^ !!stable_node_any); - if (!tree_page) { + if (!tree_folio) { /* * If we walked over a stale stable_node, * get_ksm_page() will call rb_erase() and it @@ -2141,8 +2141,8 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage) goto again; } - ret = memcmp_pages(kpage, tree_page); - put_page(tree_page); + ret = memcmp_pages(&kfolio->page, &tree_folio->page); + folio_put(tree_folio); parent = *new; if (ret < 0) @@ -2161,7 +2161,7 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage) INIT_HLIST_HEAD(&stable_node_dup->hlist); stable_node_dup->kpfn = kpfn; - set_page_stable_node(kpage, stable_node_dup); + folio_set_stable_node(kfolio, stable_node_dup); stable_node_dup->rmap_hlist_len = 0; DO_NUMA(stable_node_dup->nid = nid); if (!need_chain) { @@ -2439,7 +2439,7 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite * node in the stable tree and add both rmap_items. */ lock_page(kpage); - stable_node = stable_tree_insert(kpage); + stable_node = stable_tree_insert(page_folio(kpage)); if (stable_node) { stable_tree_append(tree_rmap_item, stable_node, false); -- 2.43.0