Re: [PATCH 3/3] sched: Add cond_resched_rwlock

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 10/27/20 12:49 PM, Ben Gardon wrote:
Rescheduling while holding a spin lock is essential for keeping long
running kernel operations running smoothly. Add the facility to
cond_resched rwlocks.

Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx>
---
  include/linux/sched.h | 12 ++++++++++++
  kernel/sched/core.c   | 40 ++++++++++++++++++++++++++++++++++++++++
  2 files changed, 52 insertions(+)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 77179160ec3ab..2eb0c53fce115 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1841,12 +1841,24 @@ static inline int _cond_resched(void) { return 0; }
  })
extern int __cond_resched_lock(spinlock_t *lock);
+extern int __cond_resched_rwlock_read(rwlock_t *lock);
+extern int __cond_resched_rwlock_write(rwlock_t *lock);
#define cond_resched_lock(lock) ({ \
  	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
  	__cond_resched_lock(lock);				\
  })
+#define cond_resched_rwlock_read(lock) ({ \
+	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
+	__cond_resched_rwlock_read(lock);			\
+})
+
+#define cond_resched_rwlock_write(lock) ({			\
+	__might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);	\
+	__cond_resched_rwlock_write(lock);			\
+})
+
  static inline void cond_resched_rcu(void)
  {
  #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d2003a7d5ab55..ac58e7829a063 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6152,6 +6152,46 @@ int __cond_resched_lock(spinlock_t *lock)
  }
  EXPORT_SYMBOL(__cond_resched_lock);
+int __cond_resched_rwlock_read(rwlock_t *lock)
+{
+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
+	int ret = 0;
+
+	lockdep_assert_held(lock);
+
+	if (rwlock_needbreak(lock) || resched) {
+		read_unlock(lock);
+		if (resched)
+			preempt_schedule_common();
+		else
+			cpu_relax();
+		ret = 1;
+		read_lock(lock);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(__cond_resched_rwlock_read);
+
+int __cond_resched_rwlock_write(rwlock_t *lock)
+{
+	int resched = should_resched(PREEMPT_LOCK_OFFSET);
+	int ret = 0;
+
+	lockdep_assert_held(lock);
+
+	if (rwlock_needbreak(lock) || resched) {
+		write_unlock(lock);
+		if (resched)
+			preempt_schedule_common();
+		else
+			cpu_relax();
+		ret = 1;
+		write_lock(lock);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(__cond_resched_rwlock_write);
+
  /**
   * yield - yield the current processor to other threads.
   *

Other than the lockdep_assert_held() changes spotted by others, this patch looks good to me.

Acked-by: Waiman Long <longman@xxxxxxxxxx>




[Index of Archives]     [KVM ARM]     [KVM ia64]     [KVM ppc]     [Virtualization Tools]     [Spice Development]     [Libvirt]     [Libvirt Users]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite Questions]     [Linux Kernel]     [Linux SCSI]     [XFree86]

  Powered by Linux