On Tue, Apr 02, 2024 at 11:30:11AM -0400, Waiman Long <longman@xxxxxxxxxx> wrote: > Yes, there is a potential that a cpus_read_lock() may be called leading to > deadlock. So unless we reverse the current cgroup_mutex --> cpu_hotplug_lock > ordering, it is not safe to call cgroup_transfer_tasks() directly. I see that cgroup_transfer_tasks() has the only user -- cpuset. What about bending it for the specific use like: diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 34aaf0e87def..64deb7212c5c 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -109,7 +109,7 @@ struct cgroup *cgroup_get_from_fd(int fd); struct cgroup *cgroup_v1v2_get_from_fd(int fd); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); -int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); +int cgroup_transfer_tasks_locked(struct cgroup *to, struct cgroup *from); int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 520a11cb12f4..f97025858c7a 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -91,7 +91,8 @@ EXPORT_SYMBOL_GPL(cgroup_attach_task_all); * * Return: %0 on success or a negative errno code on failure */ -int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) +int cgroup_transfer_tasks_locked(struct cgroup *to, struct cgroup *from) { DEFINE_CGROUP_MGCTX(mgctx); struct cgrp_cset_link *link; @@ -106,9 +106,11 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) if (ret) return ret; - cgroup_lock(); - - cgroup_attach_lock(true); + /* The locking rules serve specific purpose of v1 cpuset hotplug + * migration, see hotplug_update_tasks_legacy() and + * cgroup_attach_lock() */ + lockdep_assert_held(&cgroup_mutex); + lockdep_assert_cpus_held(); + percpu_down_write(&cgroup_threadgroup_rwsem); /* all tasks in @from are being moved, all csets are source */ spin_lock_irq(&css_set_lock); @@ -144,8 +146,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) } while (task && !ret); out_err: cgroup_migrate_finish(&mgctx); - cgroup_attach_unlock(true); - cgroup_unlock(); + percpu_up_write(&cgroup_threadgroup_rwsem); return ret; } diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 13d27b17c889..94fb8b26f038 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -4331,7 +4331,7 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) nodes_empty(parent->mems_allowed)) parent = parent_cs(parent); - if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { + if (cgroup_transfer_tasks_locked(parent->css.cgroup, cs->css.cgroup)) { pr_err("cpuset: failed to transfer tasks out of empty cpuset "); pr_cont_cgroup_name(cs->css.cgroup); pr_cont("\n"); @@ -4376,21 +4376,9 @@ hotplug_update_tasks_legacy(struct cpuset *cs, /* * Move tasks to the nearest ancestor with execution resources, - * This is full cgroup operation which will also call back into - * cpuset. Execute it asynchronously using workqueue. */ - if (is_empty && css_tryget_online(&cs->css)) { - struct cpuset_remove_tasks_struct *s; - - s = kzalloc(sizeof(*s), GFP_KERNEL); - if (WARN_ON_ONCE(!s)) { - css_put(&cs->css); - return; - } - - s->cs = cs; - INIT_WORK(&s->work, cpuset_migrate_tasks_workfn); - schedule_work(&s->work); + if (is_empty) + remove_tasks_in_empty_cpuset(cs); } }
Attachment:
signature.asc
Description: PGP signature