This patch modifies rwsem_spin_on_owner() to return a tri-state value to better reflect the state of lock holder which enables us to make a better decision of what to do next. Signed-off-by: Waiman Long <Waiman.Long@xxxxxxx> --- kernel/locking/rwsem-xadd.c | 14 +++++++++----- 1 files changed, 9 insertions(+), 5 deletions(-) diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c index 9309e72..31cb286 100644 --- a/kernel/locking/rwsem-xadd.c +++ b/kernel/locking/rwsem-xadd.c @@ -352,9 +352,13 @@ done: } /* - * Return true only if we can still spin on the owner field of the rwsem. + * Return the folowing three values depending on the lock owner state. + * 1 when owner has changed and no reader is detected yet. + * 0 when owner has change and/or owner is a reader. + * -1 when optimistic spinning has to stop because either the owner stops + * running or its timeslice has been used up. */ -static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) +static noinline int rwsem_spin_on_owner(struct rw_semaphore *sem) { struct task_struct *owner = READ_ONCE(sem->owner); @@ -374,7 +378,7 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem) /* abort spinning when need_resched or owner is not running */ if (!owner->on_cpu || need_resched()) { rcu_read_unlock(); - return false; + return -1; } cpu_relax_lowlatency(); @@ -385,7 +389,7 @@ out: * If there is a new owner or the owner is not set, we continue * spinning. */ - return !rwsem_owner_is_reader(READ_ONCE(sem->owner)); + return rwsem_owner_is_reader(READ_ONCE(sem->owner)) ? 0 : 1; } static bool rwsem_optimistic_spin(struct rw_semaphore *sem) @@ -408,7 +412,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) * 2) readers own the lock as we can't determine if they are * actively running or not. */ - while (rwsem_spin_on_owner(sem)) { + while (rwsem_spin_on_owner(sem) > 0) { /* * Try to acquire the lock */ -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe linux-s390" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html