Re: [PATCH 2/2] locking: Apply contention tracepoints in the slow path

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Tue, Mar 22, 2022 at 11:57:09AM -0700, Namhyung Kim wrote:
> diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
> index ee2fd7614a93..c88deda77cf2 100644
> --- a/kernel/locking/mutex.c
> +++ b/kernel/locking/mutex.c
> @@ -644,6 +644,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
>  	}
>  
>  	set_current_state(state);
> +	trace_contention_begin(lock, 0);
>  	for (;;) {
>  		bool first;
>  
> @@ -710,6 +711,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
>  skip_wait:
>  	/* got the lock - cleanup and rejoice! */
>  	lock_acquired(&lock->dep_map, ip);
> +	trace_contention_end(lock, 0);
>  
>  	if (ww_ctx)
>  		ww_mutex_lock_acquired(ww, ww_ctx);

(note: it's possible to get to this trace_contention_end() without ever
having passed a _begin -- fixed in the below)

> @@ -721,6 +723,7 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
>  err:
>  	__set_current_state(TASK_RUNNING);
>  	__mutex_remove_waiter(lock, &waiter);
> +	trace_contention_end(lock, ret);
>  err_early_kill:
>  	raw_spin_unlock(&lock->wait_lock);
>  	debug_mutex_free_waiter(&waiter);


So there was one thing here, that might or might not be important, but
is somewhat inconsistent with the whole thing. That is, do you want to
include optimistic spinning in the contention time or not?

Because currently you do it sometimes.

Also, if you were to add LCB_F_MUTEX then you could have something like:


--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -602,12 +602,14 @@ __mutex_lock_common(struct mutex *lock,
 	preempt_disable();
 	mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
 
+	trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
 	if (__mutex_trylock(lock) ||
 	    mutex_optimistic_spin(lock, ww_ctx, NULL)) {
 		/* got the lock, yay! */
 		lock_acquired(&lock->dep_map, ip);
 		if (ww_ctx)
 			ww_mutex_set_context_fastpath(ww, ww_ctx);
+		trace_contention_end(lock, 0);
 		preempt_enable();
 		return 0;
 	}
@@ -644,7 +646,7 @@ __mutex_lock_common(struct mutex *lock,
 	}
 
 	set_current_state(state);
-	trace_contention_begin(lock, 0);
+	trace_contention_begin(lock, LCB_F_MUTEX);
 	for (;;) {
 		bool first;
 
@@ -684,10 +686,16 @@ __mutex_lock_common(struct mutex *lock,
 		 * state back to RUNNING and fall through the next schedule(),
 		 * or we must see its unlock and acquire.
 		 */
-		if (__mutex_trylock_or_handoff(lock, first) ||
-		    (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
+		if (__mutex_trylock_or_handoff(lock, first))
 			break;
 
+		if (first) {
+			trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN);
+			if (mutex_optimistic_spin(lock, ww_ctx, &waiter))
+				break;
+			trace_contention_begin(lock, LCB_F_MUTEX);
+		}
+
 		raw_spin_lock(&lock->wait_lock);
 	}
 	raw_spin_lock(&lock->wait_lock);
@@ -723,8 +731,8 @@ __mutex_lock_common(struct mutex *lock,
 err:
 	__set_current_state(TASK_RUNNING);
 	__mutex_remove_waiter(lock, &waiter);
-	trace_contention_end(lock, ret);
 err_early_kill:
+	trace_contention_end(lock, ret);
 	raw_spin_unlock(&lock->wait_lock);
 	debug_mutex_free_waiter(&waiter);
 	mutex_release(&lock->dep_map, ip);



[Index of Archives]     [Linux Samsung SoC]     [Linux Rockchip SoC]     [Linux Actions SoC]     [Linux for Synopsys ARC Processors]     [Linux NFS]     [Linux NILFS]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]


  Powered by Linux