On Fri, Mar 18, 2022 at 08:55:32PM +0800, Boqun Feng wrote: > On Wed, Mar 16, 2022 at 03:45:48PM -0700, Namhyung Kim wrote: > [...] > > @@ -209,6 +210,7 @@ static inline int __sched __down_common(struct semaphore *sem, long state, > > long timeout) > > { > > struct semaphore_waiter waiter; > > + bool tracing = false; > > > > list_add_tail(&waiter.list, &sem->wait_list); > > waiter.task = current; > > @@ -220,18 +222,28 @@ static inline int __sched __down_common(struct semaphore *sem, long state, > > if (unlikely(timeout <= 0)) > > goto timed_out; > > __set_current_state(state); > > + if (!tracing) { > > + trace_contention_begin(sem, 0); > > This looks a littl ugly ;-/ I agree this can be simplified a bit. > Maybe we can rename __down_common() to > ___down_common() and implement __down_common() as: > > static inline int __sched __down_common(...) > { > int ret; > trace_contention_begin(sem, 0); > ret = ___down_common(...); > trace_contention_end(sem, ret); > return ret; > } > > Thoughts? > But IMO inlining tracepoints is generally not a good idea. Will increase kernel size a lot. > Regards, > Boqun > > > + tracing = true; > > + } > > raw_spin_unlock_irq(&sem->lock); > > timeout = schedule_timeout(timeout); > > raw_spin_lock_irq(&sem->lock); > > - if (waiter.up) > > + if (waiter.up) { > > + trace_contention_end(sem, 0); > > return 0; > > + } > > } > > > > timed_out: > > + if (tracing) > > + trace_contention_end(sem, -ETIME); > > list_del(&waiter.list); > > return -ETIME; > > > > interrupted: > > + if (tracing) > > + trace_contention_end(sem, -EINTR); > > list_del(&waiter.list); > > return -EINTR; > > } > > -- > > 2.35.1.894.gb6a874cedc-goog > >