Use the break function allowed by the new osq_lock() to enable early break from the OSQ when a timeout value is specified and expiration time has been reached. Signed-off-by: Waiman Long <longman@xxxxxxxxxx> --- kernel/locking/rwsem.c | 35 +++++++++++++++++++++++++++++++---- 1 file changed, 31 insertions(+), 4 deletions(-) diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index c15926ecb21e..78708097162a 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -794,23 +794,50 @@ static inline u64 rwsem_rspin_threshold(struct rw_semaphore *sem) return sched_clock() + delta; } +struct rwsem_break_arg { + u64 timeout; + int loopcnt; +}; + +static bool rwsem_osq_break(void *brk_arg) +{ + struct rwsem_break_arg *arg = brk_arg; + + arg->loopcnt++; + /* + * Check sched_clock() only once every 256 iterations. + */ + if (!(arg->loopcnt++ & 0xff) && (sched_clock() >= arg->timeout)) + return true; + return false; +} + static bool rwsem_optimistic_spin(struct rw_semaphore *sem, bool wlock, ktime_t timeout) { - bool taken = false; + bool taken = false, locked; int prev_owner_state = OWNER_NULL; int loop = 0; u64 rspin_threshold = 0, curtime; + struct rwsem_break_arg break_arg; unsigned long nonspinnable = wlock ? RWSEM_WR_NONSPINNABLE : RWSEM_RD_NONSPINNABLE; preempt_disable(); /* sem->wait_lock should not be held when doing optimistic spinning */ - if (!osq_lock(&sem->osq, NULL, NULL)) - goto done; + if (timeout) { + break_arg.timeout = ktime_to_ns(timeout); + break_arg.loopcnt = 0; + locked = osq_lock(&sem->osq, rwsem_osq_break, &break_arg); + curtime = sched_clock(); + } else { + locked = osq_lock(&sem->osq, NULL, NULL); + curtime = 0; + } - curtime = timeout ? sched_clock() : 0; + if (!locked) + goto done; /* * Optimistically spin on the owner field and attempt to acquire the -- 2.18.1