On 12/06/2016 10:06 AM, Peter Zijlstra wrote: > On Thu, Dec 01, 2016 at 03:06:45PM +0100, Nicolai Hähnle wrote: >> +++ b/kernel/locking/mutex.c >> @@ -350,7 +350,8 @@ ww_mutex_set_context_slowpath(struct ww_mutex *lock, >> * access and not reliable. >> */ >> static noinline >> -bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) >> +bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, >> + bool use_ww_ctx, struct ww_acquire_ctx *ww_ctx) >> { >> bool ret = true; >> >> @@ -373,6 +374,28 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) >> break; >> } >> >> + if (use_ww_ctx && ww_ctx->acquired > 0) { >> + struct ww_mutex *ww; >> + >> + ww = container_of(lock, struct ww_mutex, base); >> + >> + /* >> + * If ww->ctx is set the contents are undefined, only >> + * by acquiring wait_lock there is a guarantee that >> + * they are not invalid when reading. >> + * >> + * As such, when deadlock detection needs to be >> + * performed the optimistic spinning cannot be done. >> + * >> + * Check this in every inner iteration because we may >> + * be racing against another thread's ww_mutex_lock. >> + */ >> + if (READ_ONCE(ww->ctx)) { >> + ret = false; >> + break; >> + } >> + } >> + >> cpu_relax(); >> } >> rcu_read_unlock(); > Aside from the valid question about mutex_can_spin_on_owner() there's > another 'problem' here, mutex_spin_on_owner() is marked noinline, so all > the use_ww_ctx stuff doesn't 'work' here. The mutex_spin_on_owner() function was originally marked noinline because it could be a major consumer of CPU cycles in a contended lock. Having it shown separately in the perf output will help the users have a better understanding of what is consuming all the CPU cycles. So I would still like to keep it this way. If you have concern about additional latency for non-ww_mutex calls, one alternative can be: diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 0afa998..777338d 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -349,9 +349,9 @@ static __always_inline void ww_mutex_lock_acquired(struct ww * Look out! "owner" is an entirely speculative pointer * access and not reliable. */ -static noinline -bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, - bool use_ww_ctx, struct ww_acquire_ctx *ww_ctx) +static __always_inline +bool __mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, + const bool use_ww_ctx, struct ww_acquire_ctx *ww_ctx) { bool ret = true; @@ -403,6 +403,19 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_st return ret; } +static noinline +bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) +{ + return __mutex_spin_on_owner(lock, owner, false, NULL); +} + +static noinline +bool ww_mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, + struct ww_acquire_ctx *ww_ctx) +{ + return __mutex_spin_on_owner(lock, owner, true, ww_ctx); +} + /* * Initial check for entering the mutex spinning loop */ @@ -489,13 +502,17 @@ static bool mutex_optimistic_spin(struct mutex *lock, */ owner = __mutex_owner(lock); if (owner) { + bool spin_ok; + if (waiter && owner == task) { smp_mb(); /* ACQUIRE */ break; } - if (!mutex_spin_on_owner(lock, owner, use_ww_ctx, - ww_ctx)) + spin_ok = use_ww_ctx + ? ww_mutex_spin_on_owner(lock, owner, ww_ctx) + : mutex_spin_on_owner(lock, owner); + if (!spin_ok) goto fail_unlock; } _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx https://lists.freedesktop.org/mailman/listinfo/dri-devel