Currently we unconditionally account time spent waiting for events in CQ ring as iowait time. Some userspace tools consider iowait time to be CPU util/load which can be misleading as the process is sleeping. High iowait time might be indicative of issues for storage IO, but for network IO e.g. socket recv() we do not control when the completions happen so its value misleads userspace tooling. This patch gates the previously unconditional iowait accounting behind a new IORING_REGISTER opcode. By default time is not accounted as iowait, unless this is explicitly enabled for a ring. Thus userspace can decide, depending on the type of work it expects to do, whether it wants to consider cqring wait time as iowait or not. Signed-off-by: David Wei <dw@xxxxxxxxxxx> --- include/linux/io_uring_types.h | 1 + include/uapi/linux/io_uring.h | 3 +++ io_uring/io_uring.c | 9 +++++---- io_uring/register.c | 17 +++++++++++++++++ 4 files changed, 26 insertions(+), 4 deletions(-) diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index bd7071aeec5d..c568e6b8c9f9 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -242,6 +242,7 @@ struct io_ring_ctx { unsigned int drain_disabled: 1; unsigned int compat: 1; unsigned int iowq_limits_set : 1; + unsigned int iowait_enabled: 1; struct task_struct *submitter_task; struct io_rings *rings; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 7bd10201a02b..b068898c2283 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -575,6 +575,9 @@ enum { IORING_REGISTER_NAPI = 27, IORING_UNREGISTER_NAPI = 28, + /* account time spent in cqring wait as iowait */ + IORING_REGISTER_IOWAIT = 29, + /* this goes last */ IORING_REGISTER_LAST, diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index cf2f514b7cc0..7f8d2a03cce6 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2533,12 +2533,13 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, return 0; /* - * Mark us as being in io_wait if we have pending requests, so cpufreq - * can take into account that the task is waiting for IO - turns out - * to be important for low QD IO. + * Mark us as being in io_wait if we have pending requests if enabled + * via IORING_REGISTER_IOWAIT, so cpufreq can take into account that + * the task is waiting for IO - turns out to be important for low QD + * IO. */ io_wait = current->in_iowait; - if (current_pending_io()) + if (ctx->iowait_enabled && current_pending_io()) current->in_iowait = 1; ret = 0; if (iowq->timeout == KTIME_MAX) diff --git a/io_uring/register.c b/io_uring/register.c index 99c37775f974..fbdf3d3461d8 100644 --- a/io_uring/register.c +++ b/io_uring/register.c @@ -387,6 +387,17 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, return ret; } +static int io_register_iowait(struct io_ring_ctx *ctx, int val) +{ + int was_enabled = ctx->iowait_enabled; + + if (val) + ctx->iowait_enabled = 1; + else + ctx->iowait_enabled = 0; + return was_enabled; +} + static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, void __user *arg, unsigned nr_args) __releases(ctx->uring_lock) @@ -563,6 +574,12 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, break; ret = io_unregister_napi(ctx, arg); break; + case IORING_REGISTER_IOWAIT: + ret = -EINVAL; + if (arg) + break; + ret = io_register_iowait(ctx, nr_args); + break; default: ret = -EINVAL; break; -- 2.43.0