On 5/31/21 7:36 AM, Olivier Langlois wrote: > The req pointer uniquely identify a specific request. > Having it in traces can provide valuable insights that is not possible > to have if the calling process is reusing the same user_data value. Patchset are usually accompanied with a cover letter. Also, there is something wrong as b4 refuses to find the patch. Anyway... Reviewed-by: Pavel Begunkov <asml.silence@xxxxxxxxx> > Signed-off-by: Olivier Langlois <olivier@xxxxxxxxxxxxxx> > --- > fs/io_uring.c | 11 ++--- > include/trace/events/io_uring.h | 71 ++++++++++++++++++++++++--------- > 2 files changed, 59 insertions(+), 23 deletions(-) > > diff --git a/fs/io_uring.c b/fs/io_uring.c > index 903458afd56c..0737b0e76b91 100644 > --- a/fs/io_uring.c > +++ b/fs/io_uring.c > @@ -5059,7 +5059,7 @@ static void io_async_task_func(struct callback_head *cb) > struct async_poll *apoll = req->apoll; > struct io_ring_ctx *ctx = req->ctx; > > - trace_io_uring_task_run(req->ctx, req->opcode, req->user_data); > + trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data); > > if (io_poll_rewait(req, &apoll->poll)) { > spin_unlock_irq(&ctx->completion_lock); > @@ -5192,8 +5192,8 @@ static bool io_arm_poll_handler(struct io_kiocb *req) > return false; > } > spin_unlock_irq(&ctx->completion_lock); > - trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask, > - apoll->poll.events); > + trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data, > + mask, apoll->poll.events); > return true; > } > > @@ -6578,8 +6578,9 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, > goto fail_req; > > /* don't need @sqe from now on */ > - trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, > - true, ctx->flags & IORING_SETUP_SQPOLL); > + trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data, > + req->flags, true, > + ctx->flags & IORING_SETUP_SQPOLL); > > /* > * If we already have a head request, queue this one for async > diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h > index abb8b24744fd..12addad1f837 100644 > --- a/include/trace/events/io_uring.h > +++ b/include/trace/events/io_uring.h > @@ -323,8 +323,10 @@ TRACE_EVENT(io_uring_complete, > * io_uring_submit_sqe - called before submitting one SQE > * > * @ctx: pointer to a ring context structure > + * @req: pointer to a submitted request > * @opcode: opcode of request > * @user_data: user data associated with the request > + * @flags request flags > * @force_nonblock: whether a context blocking or not > * @sq_thread: true if sq_thread has submitted this SQE > * > @@ -333,41 +335,60 @@ TRACE_EVENT(io_uring_complete, > */ > TRACE_EVENT(io_uring_submit_sqe, > > - TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock, > - bool sq_thread), > + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, u32 flags, > + bool force_nonblock, bool sq_thread), > > - TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread), > + TP_ARGS(ctx, req, opcode, user_data, flags, force_nonblock, sq_thread), > > TP_STRUCT__entry ( > __field( void *, ctx ) > + __field( void *, req ) > __field( u8, opcode ) > __field( u64, user_data ) > + __field( u32, flags ) > __field( bool, force_nonblock ) > __field( bool, sq_thread ) > ), > > TP_fast_assign( > __entry->ctx = ctx; > + __entry->req = req; > __entry->opcode = opcode; > __entry->user_data = user_data; > + __entry->flags = flags; > __entry->force_nonblock = force_nonblock; > __entry->sq_thread = sq_thread; > ), > > - TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d", > - __entry->ctx, __entry->opcode, > - (unsigned long long) __entry->user_data, > - __entry->force_nonblock, __entry->sq_thread) > + TP_printk("ring %p, req %p, op %d, data 0x%llx, flags %u, " > + "non block %d, sq_thread %d", __entry->ctx, __entry->req, > + __entry->opcode, (unsigned long long)__entry->user_data, > + __entry->flags, __entry->force_nonblock, __entry->sq_thread) > ); > > +/* > + * io_uring_poll_arm - called after arming a poll wait if successful > + * > + * @ctx: pointer to a ring context structure > + * @req: pointer to the armed request > + * @opcode: opcode of request > + * @user_data: user data associated with the request > + * @mask: request poll events mask > + * @events: registered events of interest > + * > + * Allows to track which fds are waiting for and what are the events of > + * interest. > + */ > TRACE_EVENT(io_uring_poll_arm, > > - TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events), > + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data, > + int mask, int events), > > - TP_ARGS(ctx, opcode, user_data, mask, events), > + TP_ARGS(ctx, req, opcode, user_data, mask, events), > > TP_STRUCT__entry ( > __field( void *, ctx ) > + __field( void *, req ) > __field( u8, opcode ) > __field( u64, user_data ) > __field( int, mask ) > @@ -376,16 +397,17 @@ TRACE_EVENT(io_uring_poll_arm, > > TP_fast_assign( > __entry->ctx = ctx; > + __entry->req = req; > __entry->opcode = opcode; > __entry->user_data = user_data; > __entry->mask = mask; > __entry->events = events; > ), > > - TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", > - __entry->ctx, __entry->opcode, > - (unsigned long long) __entry->user_data, > - __entry->mask, __entry->events) > + TP_printk("ring %p, req %p, op %d, data 0x%llx, mask 0x%x, events 0x%x", > + __entry->ctx, __entry->req, __entry->opcode, > + (unsigned long long) __entry->user_data, > + __entry->mask, __entry->events) > ); > > TRACE_EVENT(io_uring_poll_wake, > @@ -440,27 +462,40 @@ TRACE_EVENT(io_uring_task_add, > __entry->mask) > ); > > +/* > + * io_uring_task_run - called when task_work_run() executes the poll events > + * notification callbacks > + * > + * @ctx: pointer to a ring context structure > + * @req: pointer to the armed request > + * @opcode: opcode of request > + * @user_data: user data associated with the request > + * > + * Allows to track when notified poll events are processed > + */ > TRACE_EVENT(io_uring_task_run, > > - TP_PROTO(void *ctx, u8 opcode, u64 user_data), > + TP_PROTO(void *ctx, void *req, u8 opcode, u64 user_data), > > - TP_ARGS(ctx, opcode, user_data), > + TP_ARGS(ctx, req, opcode, user_data), > > TP_STRUCT__entry ( > __field( void *, ctx ) > + __field( void *, req ) > __field( u8, opcode ) > __field( u64, user_data ) > ), > > TP_fast_assign( > __entry->ctx = ctx; > + __entry->req = req; > __entry->opcode = opcode; > __entry->user_data = user_data; > ), > > - TP_printk("ring %p, op %d, data 0x%llx", > - __entry->ctx, __entry->opcode, > - (unsigned long long) __entry->user_data) > + TP_printk("ring %p, req %p, op %d, data 0x%llx", > + __entry->ctx, __entry->req, __entry->opcode, > + (unsigned long long) __entry->user_data) > ); > > #endif /* _TRACE_IO_URING_H */ > -- Pavel Begunkov