The patch titled Subject: xarray: use kmem_cache_alloc_lru to allocate xa_node has been added to the -mm tree. Its filename is xarray-use-kmem_cache_alloc_lru-to-allocate-xa_node.patch This patch should soon appear at https://ozlabs.org/~akpm/mmots/broken-out/xarray-use-kmem_cache_alloc_lru-to-allocate-xa_node.patch and later at https://ozlabs.org/~akpm/mmotm/broken-out/xarray-use-kmem_cache_alloc_lru-to-allocate-xa_node.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: Muchun Song <songmuchun@xxxxxxxxxxxxx> Subject: xarray: use kmem_cache_alloc_lru to allocate xa_node The workingset will add the xa_node to the shadow_nodes list. So the allocation of xa_node should be done by kmem_cache_alloc_lru(). Using xas_set_lru() to pass the list_lru which we want to insert xa_node into to set up the xa_node reclaim context correctly. Link: https://lkml.kernel.org/r/20220228122126.37293-9-songmuchun@xxxxxxxxxxxxx Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> Acked-by: Johannes Weiner <hannes@xxxxxxxxxxx> Acked-by: Roman Gushchin <roman.gushchin@xxxxxxxxx> Cc: Alex Shi <alexs@xxxxxxxxxx> Cc: Anna Schumaker <Anna.Schumaker@xxxxxxxxxx> Cc: Chao Yu <chao@xxxxxxxxxx> Cc: Dave Chinner <david@xxxxxxxxxxxxx> Cc: Fam Zheng <fam.zheng@xxxxxxxxxxxxx> Cc: Jaegeuk Kim <jaegeuk@xxxxxxxxxx> Cc: Kari Argillander <kari.argillander@xxxxxxxxx> Cc: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxxxx> Cc: Qi Zheng <zhengqi.arch@xxxxxxxxxxxxx> Cc: Shakeel Butt <shakeelb@xxxxxxxxxx> Cc: Theodore Ts'o <tytso@xxxxxxx> Cc: Trond Myklebust <trond.myklebust@xxxxxxxxxxxxxxx> Cc: Vladimir Davydov <vdavydov.dev@xxxxxxxxx> Cc: Vlastimil Babka <vbabka@xxxxxxx> Cc: Wei Yang <richard.weiyang@xxxxxxxxx> Cc: Xiongchun Duan <duanxiongchun@xxxxxxxxxxxxx> Cc: Yang Shi <shy828301@xxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/swap.h | 5 ++++- include/linux/xarray.h | 9 ++++++++- lib/xarray.c | 10 +++++----- mm/workingset.c | 2 +- 4 files changed, 18 insertions(+), 8 deletions(-) --- a/include/linux/swap.h~xarray-use-kmem_cache_alloc_lru-to-allocate-xa_node +++ a/include/linux/swap.h @@ -334,9 +334,12 @@ void workingset_activation(struct folio /* Only track the nodes of mappings with shadow entries */ void workingset_update_node(struct xa_node *node); +extern struct list_lru shadow_nodes; #define mapping_set_update(xas, mapping) do { \ - if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \ + if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \ xas_set_update(xas, workingset_update_node); \ + xas_set_lru(xas, &shadow_nodes); \ + } \ } while (0) /* linux/mm/page_alloc.c */ --- a/include/linux/xarray.h~xarray-use-kmem_cache_alloc_lru-to-allocate-xa_node +++ a/include/linux/xarray.h @@ -1317,6 +1317,7 @@ struct xa_state { struct xa_node *xa_node; struct xa_node *xa_alloc; xa_update_node_t xa_update; + struct list_lru *xa_lru; }; /* @@ -1336,7 +1337,8 @@ struct xa_state { .xa_pad = 0, \ .xa_node = XAS_RESTART, \ .xa_alloc = NULL, \ - .xa_update = NULL \ + .xa_update = NULL, \ + .xa_lru = NULL, \ } /** @@ -1631,6 +1633,11 @@ static inline void xas_set_update(struct xas->xa_update = update; } +static inline void xas_set_lru(struct xa_state *xas, struct list_lru *lru) +{ + xas->xa_lru = lru; +} + /** * xas_next_entry() - Advance iterator to next present entry. * @xas: XArray operation state. --- a/lib/xarray.c~xarray-use-kmem_cache_alloc_lru-to-allocate-xa_node +++ a/lib/xarray.c @@ -302,7 +302,7 @@ bool xas_nomem(struct xa_state *xas, gfp } if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) gfp |= __GFP_ACCOUNT; - xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!xas->xa_alloc) return false; xas->xa_alloc->parent = NULL; @@ -334,10 +334,10 @@ static bool __xas_nomem(struct xa_state gfp |= __GFP_ACCOUNT; if (gfpflags_allow_blocking(gfp)) { xas_unlock_type(xas, lock_type); - xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); xas_lock_type(xas, lock_type); } else { - xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); + xas->xa_alloc = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); } if (!xas->xa_alloc) return false; @@ -371,7 +371,7 @@ static void *xas_alloc(struct xa_state * if (xas->xa->xa_flags & XA_FLAGS_ACCOUNT) gfp |= __GFP_ACCOUNT; - node = kmem_cache_alloc(radix_tree_node_cachep, gfp); + node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!node) { xas_set_err(xas, -ENOMEM); return NULL; @@ -1014,7 +1014,7 @@ void xas_split_alloc(struct xa_state *xa void *sibling = NULL; struct xa_node *node; - node = kmem_cache_alloc(radix_tree_node_cachep, gfp); + node = kmem_cache_alloc_lru(radix_tree_node_cachep, xas->xa_lru, gfp); if (!node) goto nomem; node->array = xas->xa; --- a/mm/workingset.c~xarray-use-kmem_cache_alloc_lru-to-allocate-xa_node +++ a/mm/workingset.c @@ -429,7 +429,7 @@ out: * point where they would still be useful. */ -static struct list_lru shadow_nodes; +struct list_lru shadow_nodes; void workingset_update_node(struct xa_node *node) { _ Patches currently in -mm which might be from songmuchun@xxxxxxxxxxxxx are mm-list_lru-transpose-the-array-of-per-node-per-memcg-lru-lists.patch mm-introduce-kmem_cache_alloc_lru.patch fs-introduce-alloc_inode_sb-to-allocate-filesystems-specific-inode.patch fs-allocate-inode-by-using-alloc_inode_sb.patch f2fs-allocate-inode-by-using-alloc_inode_sb.patch nfs42-use-a-specific-kmem_cache-to-allocate-nfs4_xattr_entry.patch mm-dcache-use-kmem_cache_alloc_lru-to-allocate-dentry.patch xarray-use-kmem_cache_alloc_lru-to-allocate-xa_node.patch mm-memcontrol-move-memcg_online_kmem-to-mem_cgroup_css_online.patch mm-list_lru-allocate-list_lru_one-only-when-needed.patch mm-list_lru-rename-memcg_drain_all_list_lrus-to-memcg_reparent_list_lrus.patch mm-list_lru-replace-linear-array-with-xarray.patch mm-memcontrol-reuse-memory-cgroup-id-for-kmem-id.patch mm-memcontrol-fix-cannot-alloc-the-maximum-memcg-id.patch mm-list_lru-rename-list_lru_per_memcg-to-list_lru_memcg.patch mm-memcontrol-rename-memcg_cache_id-to-memcg_kmem_id.patch mm-thp-fix-wrong-cache-flush-in-remove_migration_pmd.patch mm-fix-missing-cache-flush-for-all-tail-pages-of-compound-page.patch mm-hugetlb-fix-missing-cache-flush-in-copy_huge_page_from_user.patch mm-hugetlb-fix-missing-cache-flush-in-hugetlb_mcopy_atomic_pte.patch mm-shmem-fix-missing-cache-flush-in-shmem_mfill_atomic_pte.patch mm-userfaultfd-fix-missing-cache-flush-in-mcopy_atomic_pte-and-__mcopy_atomic.patch mm-replace-multiple-dcache-flush-with-flush_dcache_folio.patch mm-hugetlb-free-the-2nd-vmemmap-page-associated-with-each-hugetlb-page.patch mm-hugetlb-replace-hugetlb_free_vmemmap_enabled-with-a-static_key.patch mm-sparsemem-use-page-table-lock-to-protect-kernel-pmd-operations.patch selftests-vm-add-a-hugetlb-test-case.patch mm-sparsemem-move-vmemmap-related-to-hugetlb-to-config_hugetlb_page_free_vmemmap.patch