The osq_lock() and osq_unlock() function may not provide the necessary acquire and release barrier in some cases. This patch makes sure that the proper barriers are provided when osq_lock() is successful or when osq_unlock() is called. Suggested-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Signed-off-by: Waiman Long <Waiman.Long@xxxxxxx> --- kernel/locking/osq_lock.c | 24 ++++++++++++++++++------ 1 files changed, 18 insertions(+), 6 deletions(-) diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c index 05a3785..3da0b97 100644 --- a/kernel/locking/osq_lock.c +++ b/kernel/locking/osq_lock.c @@ -124,6 +124,11 @@ bool osq_lock(struct optimistic_spin_queue *lock) cpu_relax_lowlatency(); } + /* + * Add an acquire memory barrier for pairing with the release barrier + * in unlock. + */ + smp_acquire__after_ctrl_dep(); return true; unqueue: @@ -198,13 +203,20 @@ void osq_unlock(struct optimistic_spin_queue *lock) * Second most likely case. */ node = this_cpu_ptr(&osq_node); - next = xchg(&node->next, NULL); - if (next) { - WRITE_ONCE(next->locked, 1); + next = xchg_relaxed(&node->next, NULL); + if (next) + goto unlock; + + next = osq_wait_next(lock, node, NULL); + if (unlikely(!next)) { + /* + * In the unlikely event that the OSQ is empty, we need to + * provide a proper release barrier. + */ + smp_mb(); return; } - next = osq_wait_next(lock, node, NULL); - if (next) - WRITE_ONCE(next->locked, 1); +unlock: + smp_store_release(&next->locked, 1); } -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe linux-arch" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html