On Mon, Jan 21, 2019 at 10:12:34AM +0100, Peter Zijlstra wrote: > diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c > index 8a8c3c208c5e..983b49a75826 100644 > --- a/kernel/locking/qspinlock.c > +++ b/kernel/locking/qspinlock.c > @@ -412,6 +412,12 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) > idx = node->count++; > tail = encode_tail(smp_processor_id(), idx); > > + if (idx >= MAX_NODES) { > + while (!queued_spin_trylock(lock)) > + cpu_relax(); > + goto release; > + } > + > node = grab_mcs_node(node, idx); With an unlikely() and a comment, I /much/ prefer this approach! Will