Contention awareness while holding a spin lock is essential for reducing latency when long running kernel operations can hold that lock. Add the same contention detection interface for read/write spin locks. Signed-off-by: Ben Gardon <bgardon@xxxxxxxxxx> --- include/linux/sched.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index 063cd120b4593..77179160ec3ab 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1870,6 +1870,23 @@ static inline int spin_needbreak(spinlock_t *lock) #endif } +/* + * Check if a rwlock is contended. + * Returns non-zero if there is another task waiting on the rwlock. + * Returns zero if the lock is not contended or the system / underlying + * rwlock implementation does not support contention detection. + * Technically does not depend on CONFIG_PREEMPTION, but a general need + * for low latency. + */ +static inline int rwlock_needbreak(rwlock_t *lock) +{ +#ifdef CONFIG_PREEMPTION + return rwlock_is_contended(lock); +#else + return 0; +#endif +} + static __always_inline bool need_resched(void) { return unlikely(tif_need_resched()); -- 2.29.0.rc2.309.g374f81d7ae-goog