6.1-stable review patch. If anyone has any objections, please let me know. ------------------ From: Felix Moessbauer <felix.moessbauer@xxxxxxxxxxx> commit 84eacf177faa605853c58e5b1c0d9544b88c16fd upstream. The io worker threads are userland threads that just never exit to the userland. By that, they are also assigned to a cgroup (the group of the creating task). When creating a new io worker, this worker should inherit the cpuset of the cgroup. Fixes: da64d6db3bd3 ("io_uring: One wqe per wq") Signed-off-by: Felix Moessbauer <felix.moessbauer@xxxxxxxxxxx> Link: https://lore.kernel.org/r/20240910171157.166423-3-felix.moessbauer@xxxxxxxxxxx Signed-off-by: Jens Axboe <axboe@xxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- io_uring/io-wq.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -1157,6 +1157,7 @@ struct io_wq *io_wq_create(unsigned boun { int ret, node, i; struct io_wq *wq; + cpumask_var_t allowed_mask; if (WARN_ON_ONCE(!data->free_work || !data->do_work)) return ERR_PTR(-EINVAL); @@ -1176,6 +1177,9 @@ struct io_wq *io_wq_create(unsigned boun wq->do_work = data->do_work; ret = -ENOMEM; + if (!alloc_cpumask_var(&allowed_mask, GFP_KERNEL)) + goto err; + cpuset_cpus_allowed(current, allowed_mask); for_each_node(node) { struct io_wqe *wqe; int alloc_node = node; @@ -1188,7 +1192,8 @@ struct io_wq *io_wq_create(unsigned boun wq->wqes[node] = wqe; if (!alloc_cpumask_var(&wqe->cpu_mask, GFP_KERNEL)) goto err; - cpumask_copy(wqe->cpu_mask, cpumask_of_node(node)); + if (!cpumask_and(wqe->cpu_mask, cpumask_of_node(node), allowed_mask)) + cpumask_copy(wqe->cpu_mask, allowed_mask); wqe->node = alloc_node; wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded; wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers = @@ -1222,6 +1227,7 @@ err: free_cpumask_var(wq->wqes[node]->cpu_mask); kfree(wq->wqes[node]); } + free_cpumask_var(allowed_mask); err_wq: kfree(wq); return ERR_PTR(ret);