Patch "io_uring/sqpoll: fix io-wq affinity when IORING_SETUP_SQPOLL is used" has been added to the 6.1-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    io_uring/sqpoll: fix io-wq affinity when IORING_SETUP_SQPOLL is used

to the 6.1-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     io_uring-sqpoll-fix-io-wq-affinity-when-ioring_setup_sqpoll-is-used.patch
and it can be found in the queue-6.1 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.


>From stable-owner@xxxxxxxxxxxxxxx Tue Sep 12 15:57:48 2023
From: Pavel Begunkov <asml.silence@xxxxxxxxx>
Date: Tue, 12 Sep 2023 14:57:07 +0100
Subject: io_uring/sqpoll: fix io-wq affinity when IORING_SETUP_SQPOLL is used
To: stable@xxxxxxxxxxxxxxx
Cc: Jens Axboe <axboe@xxxxxxxxx>, asml.silence@xxxxxxxxx
Message-ID: <2134e6af307e20b62a591aa57169277ab2cee0bb.1694486400.git.asml.silence@xxxxxxxxx>

From: Pavel Begunkov <asml.silence@xxxxxxxxx>

From: Jens Axboe <axboe@xxxxxxxxx>

[ upstream commit ebdfefc09c6de7897962769bd3e63a2ff443ebf5 ]

If we setup the ring with SQPOLL, then that polling thread has its
own io-wq setup. This means that if the application uses
IORING_REGISTER_IOWQ_AFF to set the io-wq affinity, we should not be
setting it for the invoking task, but rather the sqpoll task.

Add an sqpoll helper that parks the thread and updates the affinity,
and use that one if we're using SQPOLL.

Fixes: fe76421d1da1 ("io_uring: allow user configurable IO thread CPU affinity")
Cc: stable@xxxxxxxxxxxxxxx # 5.10+
Link: https://github.com/axboe/liburing/discussions/884
Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
Signed-off-by: Pavel Begunkov <asml.silence@xxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
 io_uring/io-wq.c    |    7 +++++--
 io_uring/io-wq.h    |    2 +-
 io_uring/io_uring.c |   29 ++++++++++++++++++-----------
 io_uring/sqpoll.c   |   15 +++++++++++++++
 io_uring/sqpoll.h   |    1 +
 5 files changed, 40 insertions(+), 14 deletions(-)

--- a/io_uring/io-wq.c
+++ b/io_uring/io-wq.c
@@ -1350,13 +1350,16 @@ static int io_wq_cpu_offline(unsigned in
 	return __io_wq_cpu_online(wq, cpu, false);
 }
 
-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask)
+int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask)
 {
 	int i;
 
+	if (!tctx || !tctx->io_wq)
+		return -EINVAL;
+
 	rcu_read_lock();
 	for_each_node(i) {
-		struct io_wqe *wqe = wq->wqes[i];
+		struct io_wqe *wqe = tctx->io_wq->wqes[i];
 
 		if (mask)
 			cpumask_copy(wqe->cpu_mask, mask);
--- a/io_uring/io-wq.h
+++ b/io_uring/io-wq.h
@@ -50,7 +50,7 @@ void io_wq_put_and_exit(struct io_wq *wq
 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
 void io_wq_hash_work(struct io_wq_work *work, void *val);
 
-int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
+int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
 int io_wq_max_workers(struct io_wq *wq, int *new_count);
 bool io_wq_worker_stopped(void);
 
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -3835,16 +3835,28 @@ static int io_register_enable_rings(stru
 	return 0;
 }
 
+static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
+					 cpumask_var_t new_mask)
+{
+	int ret;
+
+	if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
+		ret = io_wq_cpu_affinity(current->io_uring, new_mask);
+	} else {
+		mutex_unlock(&ctx->uring_lock);
+		ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
+		mutex_lock(&ctx->uring_lock);
+	}
+
+	return ret;
+}
+
 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
 				       void __user *arg, unsigned len)
 {
-	struct io_uring_task *tctx = current->io_uring;
 	cpumask_var_t new_mask;
 	int ret;
 
-	if (!tctx || !tctx->io_wq)
-		return -EINVAL;
-
 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
 		return -ENOMEM;
 
@@ -3865,19 +3877,14 @@ static __cold int io_register_iowq_aff(s
 		return -EFAULT;
 	}
 
-	ret = io_wq_cpu_affinity(tctx->io_wq, new_mask);
+	ret = __io_register_iowq_aff(ctx, new_mask);
 	free_cpumask_var(new_mask);
 	return ret;
 }
 
 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
 {
-	struct io_uring_task *tctx = current->io_uring;
-
-	if (!tctx || !tctx->io_wq)
-		return -EINVAL;
-
-	return io_wq_cpu_affinity(tctx->io_wq, NULL);
+	return __io_register_iowq_aff(ctx, NULL);
 }
 
 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
--- a/io_uring/sqpoll.c
+++ b/io_uring/sqpoll.c
@@ -423,3 +423,18 @@ err:
 	io_sq_thread_finish(ctx);
 	return ret;
 }
+
+__cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
+				     cpumask_var_t mask)
+{
+	struct io_sq_data *sqd = ctx->sq_data;
+	int ret = -EINVAL;
+
+	if (sqd) {
+		io_sq_thread_park(sqd);
+		ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
+		io_sq_thread_unpark(sqd);
+	}
+
+	return ret;
+}
--- a/io_uring/sqpoll.h
+++ b/io_uring/sqpoll.h
@@ -27,3 +27,4 @@ void io_sq_thread_park(struct io_sq_data
 void io_sq_thread_unpark(struct io_sq_data *sqd);
 void io_put_sq_data(struct io_sq_data *sqd);
 int io_sqpoll_wait_sq(struct io_ring_ctx *ctx);
+int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask);


Patches currently in stable-queue which might be from stable-owner@xxxxxxxxxxxxxxx are

queue-6.1/io_uring-don-t-set-affinity-on-a-dying-sqpoll-thread.patch
queue-6.1/io_uring-always-lock-in-io_apoll_task_func.patch
queue-6.1/io_uring-revert-io_uring-fix-multishot-accept-ordering.patch
queue-6.1/io_uring-net-don-t-overflow-multishot-accept.patch
queue-6.1/io_uring-sqpoll-fix-io-wq-affinity-when-ioring_setup_sqpoll-is-used.patch
queue-6.1/io_uring-break-out-of-iowq-iopoll-on-teardown.patch



[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux