From: Zeng Jingxiang <linuszeng@xxxxxxxxxxx> In the following memcg_list_lru_alloc() function, mlru here is almost always NULL, so in most cases this should save a function call, mark mlru as unlikely to optimize the code, and reusing the mlru for the next attempt when the tree insertion fails. do { xas_lock_irqsave(&xas, flags); if (!xas_load(&xas) && !css_is_dying(&pos->css)) { xas_store(&xas, mlru); if (!xas_error(&xas)) mlru = NULL; } xas_unlock_irqrestore(&xas, flags); } while (xas_nomem(&xas, GFP_KERNEL)); > if (mlru) kfree(mlru); Reported-by: kernel test robot <lkp@xxxxxxxxx> Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@xxxxxxxxx/ Signed-off-by: Zeng Jingxiang <linuszeng@xxxxxxxxxxx> Suggested-by: Johannes Weiner <hannes@xxxxxxxxxxx> --- mm/list_lru.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/mm/list_lru.c b/mm/list_lru.c index 7d69434c70e0..490473af3122 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, gfp_t gfp) { unsigned long flags; - struct list_lru_memcg *mlru; + struct list_lru_memcg *mlru = NULL; struct mem_cgroup *pos, *parent; XA_STATE(xas, &lru->xa, 0); @@ -535,9 +535,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, parent = parent_mem_cgroup(pos); } - mlru = memcg_init_list_lru_one(lru, gfp); - if (!mlru) - return -ENOMEM; + if (!mlru) { + mlru = memcg_init_list_lru_one(lru, gfp); + if (!mlru) + return -ENOMEM; + } xas_set(&xas, pos->kmemcg_id); do { xas_lock_irqsave(&xas, flags); @@ -548,10 +550,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, } xas_unlock_irqrestore(&xas, flags); } while (xas_nomem(&xas, gfp)); - if (mlru) - kfree(mlru); } while (pos != memcg && !css_is_dying(&pos->css)); + if (unlikely(mlru)) + kfree(mlru); + return xas_error(&xas); } #else -- 2.43.5