The patch titled Subject: mm/mglru: make memcg_lru->lock irq safe has been added to the -mm mm-hotfixes-unstable branch. Its filename is mm-mglru-make-memcg_lru-lock-irq-safe.patch This patch will shortly appear at https://git.kernel.org/pub/scm/linux/kernel/git/akpm/25-new.git/tree/patches/mm-mglru-make-memcg_lru-lock-irq-safe.patch This patch will later appear in the mm-hotfixes-unstable branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next via the mm-everything branch at git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm and is updated there every 2-3 working days ------------------------------------------------------ From: Yu Zhao <yuzhao@xxxxxxxxxx> Subject: mm/mglru: make memcg_lru->lock irq safe Date: Mon, 19 Jun 2023 13:38:21 -0600 lru_gen_rotate_memcg() can happen in softirq if memory.soft_limit_in_bytes is set. This requires memcg_lru->lock to be irq safe. This problem only affects memcg v1. Link: https://lkml.kernel.org/r/20230619193821.2710944-1-yuzhao@xxxxxxxxxx Fixes: e4dde56cd208 ("mm: multi-gen LRU: per-node lru_gen_folio lists") Signed-off-by: Yu Zhao <yuzhao@xxxxxxxxxx> Reported-by: syzbot+87c490fd2be656269b6a@xxxxxxxxxxxxxxxxxxxxxxxxx Closes: https://syzkaller.appspot.com/bug?extid=87c490fd2be656269b6a Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/vmscan.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) --- a/mm/vmscan.c~mm-mglru-make-memcg_lru-lock-irq-safe +++ a/mm/vmscan.c @@ -4743,10 +4743,11 @@ static void lru_gen_rotate_memcg(struct { int seg; int old, new; + unsigned long flags; int bin = get_random_u32_below(MEMCG_NR_BINS); struct pglist_data *pgdat = lruvec_pgdat(lruvec); - spin_lock(&pgdat->memcg_lru.lock); + spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); @@ -4781,7 +4782,7 @@ static void lru_gen_rotate_memcg(struct if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); - spin_unlock(&pgdat->memcg_lru.lock); + spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); } void lru_gen_online_memcg(struct mem_cgroup *memcg) @@ -4794,7 +4795,7 @@ void lru_gen_online_memcg(struct mem_cgr struct pglist_data *pgdat = NODE_DATA(nid); struct lruvec *lruvec = get_lruvec(memcg, nid); - spin_lock(&pgdat->memcg_lru.lock); + spin_lock_irq(&pgdat->memcg_lru.lock); VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); @@ -4805,7 +4806,7 @@ void lru_gen_online_memcg(struct mem_cgr lruvec->lrugen.gen = gen; - spin_unlock(&pgdat->memcg_lru.lock); + spin_unlock_irq(&pgdat->memcg_lru.lock); } } @@ -4829,7 +4830,7 @@ void lru_gen_release_memcg(struct mem_cg struct pglist_data *pgdat = NODE_DATA(nid); struct lruvec *lruvec = get_lruvec(memcg, nid); - spin_lock(&pgdat->memcg_lru.lock); + spin_lock_irq(&pgdat->memcg_lru.lock); VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); @@ -4841,7 +4842,7 @@ void lru_gen_release_memcg(struct mem_cg if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); - spin_unlock(&pgdat->memcg_lru.lock); + spin_unlock_irq(&pgdat->memcg_lru.lock); } } _ Patches currently in -mm which might be from yuzhao@xxxxxxxxxx are mm-mglru-make-memcg_lru-lock-irq-safe.patch