[RFC PATCH 53/86] sched: fixup __cond_resched_*()

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Remove the call to _cond_resched(). The rescheduling happens
implicitly when we give up the lock.

Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx>
---
 kernel/sched/core.c | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 15db5fb7acc7..e1b0759ed3ab 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8595,12 +8595,8 @@ EXPORT_SYMBOL(_cond_resched);
 #endif
 
 /*
- * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
- * call schedule, and on return reacquire the lock.
- *
- * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
- * operations here to prevent schedule() from being called twice (once via
- * spin_unlock(), once by hand).
+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock
+ * (implicitly calling schedule), and reacquire the lock.
  */
 int __cond_resched_lock(spinlock_t *lock)
 {
@@ -8611,7 +8607,7 @@ int __cond_resched_lock(spinlock_t *lock)
 
 	if (spin_needbreak(lock) || resched) {
 		spin_unlock(lock);
-		if (!_cond_resched())
+		if (!resched)
 			cpu_relax();
 		ret = 1;
 		spin_lock(lock);
@@ -8629,7 +8625,7 @@ int __cond_resched_rwlock_read(rwlock_t *lock)
 
 	if (rwlock_needbreak(lock) || resched) {
 		read_unlock(lock);
-		if (!_cond_resched())
+		if (!resched)
 			cpu_relax();
 		ret = 1;
 		read_lock(lock);
@@ -8647,7 +8643,7 @@ int __cond_resched_rwlock_write(rwlock_t *lock)
 
 	if (rwlock_needbreak(lock) || resched) {
 		write_unlock(lock);
-		if (!_cond_resched())
+		if (!resched)
 			cpu_relax();
 		ret = 1;
 		write_lock(lock);
-- 
2.31.1





[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux