On Tue, Jun 14, 2022 at 06:03:59PM -0400, Waiman Long wrote: > diff --git a/mm/kmemleak.c b/mm/kmemleak.c > index 7dd64139a7c7..abba063ae5ee 100644 > --- a/mm/kmemleak.c > +++ b/mm/kmemleak.c > @@ -1417,12 +1417,16 @@ static void kmemleak_scan(void) > struct zone *zone; > int __maybe_unused i; > int new_leaks = 0; > + int loop1_cnt = 0; > > jiffies_last_scan = jiffies; > > /* prepare the kmemleak_object's */ > rcu_read_lock(); > list_for_each_entry_rcu(object, &object_list, object_list) { > + bool obj_pinned = false; > + > + loop1_cnt++; > raw_spin_lock_irq(&object->lock); > #ifdef DEBUG > /* > @@ -1437,10 +1441,32 @@ static void kmemleak_scan(void) > #endif > /* reset the reference count (whiten the object) */ > object->count = 0; > - if (color_gray(object) && get_object(object)) > + if (color_gray(object) && get_object(object)) { > list_add_tail(&object->gray_list, &gray_list); > + obj_pinned = true; > + } > > raw_spin_unlock_irq(&object->lock); > + > + /* > + * Do a cond_resched() to avoid soft lockup every 64k objects. > + * Make sure a reference has been taken so that the object > + * won't go away without RCU read lock. > + */ > + if (!(loop1_cnt & 0xffff)) { > + if (!obj_pinned && !get_object(object)) { > + /* Try the next object instead */ > + loop1_cnt--; > + continue; > + } With this trick we could probably get rid of rcu_read_lock() and take the kmemleak_lock instead. But that's for another patch. > + > + rcu_read_unlock(); > + cond_resched(); > + rcu_read_lock(); cond_resched_rcu() to save a couple of lines? > + > + if (!obj_pinned) > + put_object(object); > + } Reviewed-by: Catalin Marinas <catalin.marinas@xxxxxxx> Thanks.