On Mon, Feb 28, 2022 at 05:04:10PM -0800, Namhyung Kim wrote: > @@ -80,7 +81,9 @@ static inline void queued_read_lock(struct qrwlock *lock) > return; > > /* The slowpath will decrement the reader count, if necessary. */ > + LOCK_CONTENTION_BEGIN(lock, LCB_F_READ); > queued_read_lock_slowpath(lock); > + LOCK_CONTENTION_END(lock); > } > > /** > @@ -94,7 +97,9 @@ static inline void queued_write_lock(struct qrwlock *lock) > if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))) > return; > > + LOCK_CONTENTION_BEGIN(lock, LCB_F_WRITE); > queued_write_lock_slowpath(lock); > + LOCK_CONTENTION_END(lock); > } > @@ -82,7 +83,9 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock) > if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL))) > return; > > + LOCK_CONTENTION_BEGIN(lock, 0); > queued_spin_lock_slowpath(lock, val); > + LOCK_CONTENTION_END(lock); > } Can you please stick that _inside_ the slowpath? You really don't want to inline that.