Jens Axboe <axboe@xxxxxxxxx> writes: > - TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%x, %s queue, work %p", > + TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%lx, %s queue, work %p", > __entry->ctx, __entry->req, __entry->user_data, > - __get_str(op_str), > - __entry->flags, __entry->rw ? "hashed" : "normal", __entry->work) > + __get_str(op_str), (long) __entry->flags, Hi Jens, Minor, but on 32-bit kernel the cast is wrong since sizeof(long)==4. Afaik, io_uring still builds on 32-bit archs. If you use (unsigned long long), it will be 64 bit anywhere. > + __entry->rw ? "hashed" : "normal", __entry->work) > ); > > /** > @@ -378,7 +378,7 @@ TRACE_EVENT(io_uring_submit_req, > __field( void *, req ) > __field( unsigned long long, user_data ) > __field( u8, opcode ) > - __field( u32, flags ) > + __field( io_req_flags_t, flags ) > __field( bool, sq_thread ) > > __string( op_str, io_uring_get_opcode(req->opcode) ) > @@ -395,10 +395,10 @@ TRACE_EVENT(io_uring_submit_req, > __assign_str(op_str, io_uring_get_opcode(req->opcode)); > ), > > - TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%x, " > + TP_printk("ring %p, req %p, user_data 0x%llx, opcode %s, flags 0x%lx, " > "sq_thread %d", __entry->ctx, __entry->req, > __entry->user_data, __get_str(op_str), > - __entry->flags, __entry->sq_thread) > + (long) __entry->flags, __entry->sq_thread) likewise. > ); > > /* > diff --git a/io_uring/filetable.h b/io_uring/filetable.h > index b47adf170c31..b2435c4dca1f 100644 > --- a/io_uring/filetable.h > +++ b/io_uring/filetable.h > @@ -17,7 +17,7 @@ int io_fixed_fd_remove(struct io_ring_ctx *ctx, unsigned int offset); > int io_register_file_alloc_range(struct io_ring_ctx *ctx, > struct io_uring_file_index_range __user *arg); > > -unsigned int io_file_get_flags(struct file *file); > +io_req_flags_t io_file_get_flags(struct file *file); > > static inline void io_file_bitmap_clear(struct io_file_table *table, int bit) > { > diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c > index cd9a137ad6ce..b8ca907b77eb 100644 > --- a/io_uring/io_uring.c > +++ b/io_uring/io_uring.c > @@ -1768,9 +1768,9 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) > } > } > > -unsigned int io_file_get_flags(struct file *file) > +io_req_flags_t io_file_get_flags(struct file *file) > { > - unsigned int res = 0; > + io_req_flags_t res = 0; > > if (S_ISREG(file_inode(file)->i_mode)) > res |= REQ_F_ISREG; > @@ -2171,7 +2171,8 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, > /* req is partially pre-initialised, see io_preinit_req() */ > req->opcode = opcode = READ_ONCE(sqe->opcode); > /* same numerical values with corresponding REQ_F_*, safe to copy */ > - req->flags = sqe_flags = READ_ONCE(sqe->flags); > + sqe_flags = READ_ONCE(sqe->flags); Did you consider that READ_ONCE won't protect from load tearing the userspace value in 32-bit architectures? It builds silently, though, and I suspect it is mostly fine in the current code, but might become a bug eventually. Thanks, -- Gabriel Krisman Bertazi