This is a note to let you know that I've just added the patch titled cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock to the 4.19-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: cgroup-fix-threadgroup_rwsem-cpus_read_lock-deadlock.patch and it can be found in the queue-4.19 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From caixinchen1@xxxxxxxxxx Mon Mar 20 02:21:15 2023 From: Cai Xinchen <caixinchen1@xxxxxxxxxx> Date: Mon, 20 Mar 2023 01:15:06 +0000 Subject: cgroup: Fix threadgroup_rwsem <-> cpus_read_lock() deadlock To: <longman@xxxxxxxxxx>, <lizefan.x@xxxxxxxxxxxxx>, <tj@xxxxxxxxxx>, <hannes@xxxxxxxxxxx>, <gregkh@xxxxxxxxxxxxxxxxxxx>, <sashal@xxxxxxxxxx> Cc: <mkoutny@xxxxxxxx>, <zhangqiao22@xxxxxxxxxx>, <juri.lelli@xxxxxxxxxx>, <penguin-kernel@xxxxxxxxxxxxxxxxxxx>, <stable@xxxxxxxxxxxxxxx>, <cgroups@xxxxxxxxxxxxxxx>, <linux-kernel@xxxxxxxxxxxxxxx> Message-ID: <20230320011507.129441-3-caixinchen1@xxxxxxxxxx> From: Tejun Heo <tj@xxxxxxxxxx> commit 4f7e7236435ca0abe005c674ebd6892c6e83aeb3 upstream. Add #include <linux/cpu.h> to avoid compile error on some architectures. commit 9a3284fad42f6 ("cgroup: Optimize single thread migration") and commit 671c11f0619e5 ("cgroup: Elide write-locking threadgroup_rwsem when updating csses on an empty subtree") are not backport. So ignore the input parameter of cgroup_attach_lock/cgroup_attach_unlock. original commit message: Bringing up a CPU may involve creating and destroying tasks which requires read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside cpus_read_lock(). However, cpuset's ->attach(), which may be called with thredagroup_rwsem write-locked, also wants to disable CPU hotplug and acquires cpus_read_lock(), leading to a deadlock. Fix it by guaranteeing that ->attach() is always called with CPU hotplug disabled and removing cpus_read_lock() call from cpuset_attach(). Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> Reviewed-and-tested-by: Imran Khan <imran.f.khan@xxxxxxxxxx> Reported-and-tested-by: Xuewen Yan <xuewen.yan@xxxxxxxxxx> Fixes: 05c7b7a92cc8 ("cgroup/cpuset: Fix a race between cpuset_attach() and cpu hotplug") Cc: stable@xxxxxxxxxxxxxxx # v5.17+ Signed-off-by: Cai Xinchen <caixinchen1@xxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- v2: - Add #include <linux/cpu.h> in kernel/cgroup/cgroup.c to avoid compile error on some architectures Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- kernel/cgroup/cgroup.c | 50 ++++++++++++++++++++++++++++++++++++++++++++----- kernel/cgroup/cpuset.c | 7 ------ 2 files changed, 46 insertions(+), 11 deletions(-) --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -55,6 +55,7 @@ #include <linux/nsproxy.h> #include <linux/file.h> #include <linux/sched/cputime.h> +#include <linux/cpu.h> #include <net/sock.h> #define CREATE_TRACE_POINTS @@ -2210,6 +2211,45 @@ int task_cgroup_path(struct task_struct EXPORT_SYMBOL_GPL(task_cgroup_path); /** + * cgroup_attach_lock - Lock for ->attach() + * @lock_threadgroup: whether to down_write cgroup_threadgroup_rwsem + * + * cgroup migration sometimes needs to stabilize threadgroups against forks and + * exits by write-locking cgroup_threadgroup_rwsem. However, some ->attach() + * implementations (e.g. cpuset), also need to disable CPU hotplug. + * Unfortunately, letting ->attach() operations acquire cpus_read_lock() can + * lead to deadlocks. + * + * Bringing up a CPU may involve creating and destroying tasks which requires + * read-locking threadgroup_rwsem, so threadgroup_rwsem nests inside + * cpus_read_lock(). If we call an ->attach() which acquires the cpus lock while + * write-locking threadgroup_rwsem, the locking order is reversed and we end up + * waiting for an on-going CPU hotplug operation which in turn is waiting for + * the threadgroup_rwsem to be released to create new tasks. For more details: + * + * http://lkml.kernel.org/r/20220711174629.uehfmqegcwn2lqzu@wubuntu + * + * Resolve the situation by always acquiring cpus_read_lock() before optionally + * write-locking cgroup_threadgroup_rwsem. This allows ->attach() to assume that + * CPU hotplug is disabled on entry. + */ +static void cgroup_attach_lock(void) +{ + get_online_cpus(); + percpu_down_write(&cgroup_threadgroup_rwsem); +} + +/** + * cgroup_attach_unlock - Undo cgroup_attach_lock() + * @lock_threadgroup: whether to up_write cgroup_threadgroup_rwsem + */ +static void cgroup_attach_unlock(void) +{ + percpu_up_write(&cgroup_threadgroup_rwsem); + put_online_cpus(); +} + +/** * cgroup_migrate_add_task - add a migration target task to a migration context * @task: target task * @mgctx: target migration context @@ -2694,7 +2734,7 @@ struct task_struct *cgroup_procs_write_s if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) return ERR_PTR(-EINVAL); - percpu_down_write(&cgroup_threadgroup_rwsem); + cgroup_attach_lock(); rcu_read_lock(); if (pid) { @@ -2725,7 +2765,7 @@ struct task_struct *cgroup_procs_write_s goto out_unlock_rcu; out_unlock_threadgroup: - percpu_up_write(&cgroup_threadgroup_rwsem); + cgroup_attach_unlock(); out_unlock_rcu: rcu_read_unlock(); return tsk; @@ -2740,7 +2780,7 @@ void cgroup_procs_write_finish(struct ta /* release reference from cgroup_procs_write_start() */ put_task_struct(task); - percpu_up_write(&cgroup_threadgroup_rwsem); + cgroup_attach_unlock(); for_each_subsys(ss, ssid) if (ss->post_attach) ss->post_attach(); @@ -2799,7 +2839,7 @@ static int cgroup_update_dfl_csses(struc lockdep_assert_held(&cgroup_mutex); - percpu_down_write(&cgroup_threadgroup_rwsem); + cgroup_attach_lock(); /* look up all csses currently attached to @cgrp's subtree */ spin_lock_irq(&css_set_lock); @@ -2830,7 +2870,7 @@ static int cgroup_update_dfl_csses(struc ret = cgroup_migrate_execute(&mgctx); out_finish: cgroup_migrate_finish(&mgctx); - percpu_up_write(&cgroup_threadgroup_rwsem); + cgroup_attach_unlock(); return ret; } --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1528,11 +1528,7 @@ static void cpuset_attach(struct cgroup_ cgroup_taskset_first(tset, &css); cs = css_cs(css); - /* - * It should hold cpus lock because a cpu offline event can - * cause set_cpus_allowed_ptr() failed. - */ - get_online_cpus(); + lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ mutex_lock(&cpuset_mutex); /* prepare for attach */ @@ -1588,7 +1584,6 @@ static void cpuset_attach(struct cgroup_ wake_up(&cpuset_attach_wq); mutex_unlock(&cpuset_mutex); - put_online_cpus(); } /* The various types of files and directories in a cpuset file system */ Patches currently in stable-queue which might be from caixinchen1@xxxxxxxxxx are queue-4.19/cgroup-cpuset-change-cpuset_rwsem-and-hotplug-lock-order.patch queue-4.19/cgroup-fix-threadgroup_rwsem-cpus_read_lock-deadlock.patch queue-4.19/cgroup-add-missing-cpus_read_lock-to-cgroup_attach_task_all.patch