There may be different cost for reeading just one byte or more, so it's benificial to keep ctx flag bits that we access together in a single byte. That affected code generation of __io_cq_unlock_post_flush() and removed one memory load. Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx> --- include/linux/io_uring_types.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index cc0cf0705b8f..0efe4d784358 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -196,17 +196,17 @@ struct io_ring_ctx { /* const or read-mostly hot data */ struct { unsigned int flags; - unsigned int compat: 1; unsigned int drain_next: 1; unsigned int restricted: 1; unsigned int off_timeout_used: 1; unsigned int drain_active: 1; - unsigned int drain_disabled: 1; unsigned int has_evfd: 1; - unsigned int syscall_iopoll: 1; /* all CQEs should be posted only by the submitter task */ unsigned int task_complete: 1; + unsigned int syscall_iopoll: 1; unsigned int poll_activated: 1; + unsigned int drain_disabled: 1; + unsigned int compat: 1; enum task_work_notify_mode notify_method; struct io_rings *rings; -- 2.38.1