On Mon, Jul 08, 2024 at 01:39:57PM +0000, Adrian Huang12 wrote: > Hi, > > > Could you please test it: > > > > <snip> > > diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 03b82fb8ecd3..6dc204b8495a > > 100644 > > --- a/mm/vmalloc.c > > +++ b/mm/vmalloc.c > > @@ -2190,10 +2190,12 @@ static void purge_vmap_node(struct work_struct > > *work) { > > struct vmap_node *vn = container_of(work, > > struct vmap_node, purge_work); > > + unsigned long resched_threshold; > > struct vmap_area *va, *n_va; > > LIST_HEAD(local_list); > > > > vn->nr_purged = 0; > > + resched_threshold = lazy_max_pages() << 1; > > > > list_for_each_entry_safe(va, n_va, &vn->purge_list, list) { > > unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; @@ > > -2210,6 +2212,9 @@ static void purge_vmap_node(struct work_struct *work) > > atomic_long_sub(nr, &vmap_lazy_nr); > > vn->nr_purged++; > > > > + if (atomic_long_read(&vmap_lazy_nr) < resched_threshold) > > + cond_resched(); > > + > > if (is_vn_id_valid(vn_id) && !vn->skip_populate) > > if (node_pool_add_va(vn, va)) > > continue; > > <snip> > > This patch can fix the issue. Feel free to add my tested-by. > Tested-by: Adrian Huang <ahuang12@xxxxxxxxxx> > Thank you. I will add you tested-by! -- Uladzislau Rezki