Now that we don't have any users of cond_resched() in the tree, we can finally remove it. Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx> Cc: Juri Lelli <juri.lelli@xxxxxxxxxx> Cc: Vincent Guittot <vincent.guittot@xxxxxxxxxx> Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx> --- include/linux/sched.h | 16 ++++------------ kernel/sched/core.c | 13 ------------- 2 files changed, 4 insertions(+), 25 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index bae6eed534dd..bbb981c1a142 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2083,19 +2083,11 @@ static inline bool test_tsk_need_resched_any(struct task_struct *tsk) } /* - * cond_resched() and cond_resched_lock(): latency reduction via - * explicit rescheduling in places that are safe. The return - * value indicates whether a reschedule was done in fact. - * cond_resched_lock() will drop the spinlock before scheduling, + * cond_resched_lock(): latency reduction via explicit rescheduling + * in places that are safe. The return value indicates whether a + * reschedule was done in fact. cond_resched_lock() will drop the + * spinlock before scheduling. */ -#ifdef CONFIG_PREEMPTION -static inline int _cond_resched(void) { return 0; } -#endif - -#define cond_resched() ({ \ - __might_resched(__FILE__, __LINE__, 0); \ - _cond_resched(); \ -}) extern int __cond_resched_lock(spinlock_t *lock); extern int __cond_resched_rwlock_read(rwlock_t *lock); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 691b50791e04..6940893e3930 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8580,19 +8580,6 @@ SYSCALL_DEFINE0(sched_yield) return 0; } -#ifndef CONFIG_PREEMPTION -int __sched _cond_resched(void) -{ - if (should_resched(0)) { - preempt_schedule_common(); - return 1; - } - - return 0; -} -EXPORT_SYMBOL(_cond_resched); -#endif - /* * __cond_resched_lock() - if a reschedule is pending, drop the given lock * (implicitly calling schedule), and reacquire the lock. -- 2.31.1