From: Chuck Lever <chuck.lever@xxxxxxxxxx> Distinguish between the case where new work was picked up just by looking at the transport queue versus when the thread was awoken. This gives us better visibility about how well-utilized the thread pool is. Signed-off-by: Chuck Lever <chuck.lever@xxxxxxxxxx> --- include/trace/events/sunrpc.h | 47 +++++++++++++++++++++++++++++++---------- net/sunrpc/svc_xprt.c | 9 +++++--- 2 files changed, 41 insertions(+), 15 deletions(-) diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 9b70fc1c698a..2e83887b58cd 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -2015,34 +2015,57 @@ TRACE_EVENT(svc_xprt_enqueue, SVC_XPRT_ENDPOINT_VARARGS, __entry->pid) ); -TRACE_EVENT(svc_xprt_dequeue, +#define show_svc_pool_flags(x) \ + __print_flags(x, "|", \ + { BIT(SP_TASK_PENDING), "TASK_PENDING" }, \ + { BIT(SP_CONGESTED), "CONGESTED" }) + +DECLARE_EVENT_CLASS(svc_pool_scheduler_class, TP_PROTO( - const struct svc_rqst *rqst + const struct svc_rqst *rqstp ), - TP_ARGS(rqst), + TP_ARGS(rqstp), TP_STRUCT__entry( - SVC_XPRT_ENDPOINT_FIELDS(rqst->rq_xprt) + SVC_XPRT_ENDPOINT_FIELDS(rqstp->rq_xprt) + __string(name, rqstp->rq_server->sv_name) + __field(int, pool_id) + __field(unsigned int, nrthreads) + __field(unsigned long, pool_flags) __field(unsigned long, wakeup) ), TP_fast_assign( - SVC_XPRT_ENDPOINT_ASSIGNMENTS(rqst->rq_xprt); + struct svc_pool *pool = rqstp->rq_pool; + SVC_XPRT_ENDPOINT_ASSIGNMENTS(rqstp->rq_xprt); + __assign_str(name, rqstp->rq_server->sv_name); + __entry->pool_id = pool->sp_id; + __entry->nrthreads = pool->sp_nrthreads; + __entry->pool_flags = pool->sp_flags; __entry->wakeup = ktime_to_us(ktime_sub(ktime_get(), - rqst->rq_qtime)); + rqstp->rq_qtime)); ), - TP_printk(SVC_XPRT_ENDPOINT_FORMAT " wakeup-us=%lu", - SVC_XPRT_ENDPOINT_VARARGS, __entry->wakeup) + TP_printk(SVC_XPRT_ENDPOINT_FORMAT + " service=%s pool=%d pool_flags=%s nrthreads=%u wakeup-us=%lu", + SVC_XPRT_ENDPOINT_VARARGS, __get_str(name), __entry->pool_id, + show_svc_pool_flags(__entry->pool_flags), __entry->nrthreads, + __entry->wakeup + ) ); -#define show_svc_pool_flags(x) \ - __print_flags(x, "|", \ - { BIT(SP_TASK_PENDING), "TASK_PENDING" }, \ - { BIT(SP_CONGESTED), "CONGESTED" }) +#define DEFINE_SVC_POOL_SCHEDULER_EVENT(name) \ + DEFINE_EVENT(svc_pool_scheduler_class, svc_pool_##name, \ + TP_PROTO( \ + const struct svc_rqst *rqstp \ + ), \ + TP_ARGS(rqstp)) + +DEFINE_SVC_POOL_SCHEDULER_EVENT(polled); +DEFINE_SVC_POOL_SCHEDULER_EVENT(awoken); TRACE_EVENT(svc_pool_starved, TP_PROTO( diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index a1ed6fb69793..7ee095d03996 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -744,8 +744,10 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) WARN_ON_ONCE(rqstp->rq_xprt); rqstp->rq_xprt = svc_xprt_dequeue(pool); - if (rqstp->rq_xprt) + if (rqstp->rq_xprt) { + trace_svc_pool_polled(rqstp); goto out_found; + } /* * We have to be able to interrupt this wait @@ -767,8 +769,10 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) set_bit(RQ_BUSY, &rqstp->rq_flags); smp_mb__after_atomic(); rqstp->rq_xprt = svc_xprt_dequeue(pool); - if (rqstp->rq_xprt) + if (rqstp->rq_xprt) { + trace_svc_pool_awoken(rqstp); goto out_found; + } if (!time_left) percpu_counter_inc(&pool->sp_threads_timedout); @@ -784,7 +788,6 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) rqstp->rq_chandle.thread_wait = 5*HZ; else rqstp->rq_chandle.thread_wait = 1*HZ; - trace_svc_xprt_dequeue(rqstp); return rqstp->rq_xprt; }