On Tue, Nov 07, 2023 at 01:57:21PM -0800, Ankur Arora wrote: > diff --git a/include/linux/sched.h b/include/linux/sched.h > index 95d47783ff6e..5f0d7341cb88 100644 > --- a/include/linux/sched.h > +++ b/include/linux/sched.h > @@ -2172,9 +2172,11 @@ static inline int rwlock_needbreak(rwlock_t *lock) > > static __always_inline bool need_resched(void) > { > - return unlikely(tif_need_resched()); > + return unlikely(tif_need_resched(RESCHED_eager) || > + tif_need_resched(RESCHED_lazy)); > } > > + We really needed this extra blank line, yes? :-) > /* > * Wrappers for p->thread_info->cpu access. No-op on UP. > */ > diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h > index 478084f9105e..719416fe8ddc 100644 > --- a/include/linux/sched/idle.h > +++ b/include/linux/sched/idle.h > @@ -63,7 +63,7 @@ static __always_inline bool __must_check current_set_polling_and_test(void) > */ > smp_mb__after_atomic(); > > - return unlikely(tif_need_resched()); > + return unlikely(need_resched()); > } You're stacking unlikely's, need_resched() already has unlikely.