Hi, On 10/29/2023 2:14 PM, Yafang Shao wrote: > A new helper is added for cgroup1 hierarchy: > > - task_get_cgroup1 > Acquires the associated cgroup of a task within a specific cgroup1 > hierarchy. The cgroup1 hierarchy is identified by its hierarchy ID. > > This helper function is added to facilitate the tracing of tasks within > a particular container or cgroup dir in BPF programs. It's important to > note that this helper is designed specifically for cgroup1 only. > > Suggested-by: Tejun Heo <tj@xxxxxxxxxx> > Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx> > --- > include/linux/cgroup.h | 4 +++- > kernel/cgroup/cgroup-internal.h | 1 - > kernel/cgroup/cgroup-v1.c | 33 +++++++++++++++++++++++++++++++++ > 3 files changed, 36 insertions(+), 2 deletions(-) > > diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h > index b307013..e063e4c 100644 > --- a/include/linux/cgroup.h > +++ b/include/linux/cgroup.h > @@ -71,6 +71,7 @@ struct css_task_iter { > extern struct file_system_type cgroup_fs_type; > extern struct cgroup_root cgrp_dfl_root; > extern struct css_set init_css_set; > +extern spinlock_t css_set_lock; > > #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; > #include <linux/cgroup_subsys.h> > @@ -388,7 +389,6 @@ static inline void cgroup_unlock(void) > * as locks used during the cgroup_subsys::attach() methods. > */ > #ifdef CONFIG_PROVE_RCU > -extern spinlock_t css_set_lock; > #define task_css_set_check(task, __c) \ > rcu_dereference_check((task)->cgroups, \ > rcu_read_lock_sched_held() || \ > @@ -855,4 +855,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {} > > #endif /* CONFIG_CGROUP_BPF */ > > +struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id); > + > #endif /* _LINUX_CGROUP_H */ > diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h > index 5e17f01..520b90d 100644 > --- a/kernel/cgroup/cgroup-internal.h > +++ b/kernel/cgroup/cgroup-internal.h > @@ -164,7 +164,6 @@ struct cgroup_mgctx { > #define DEFINE_CGROUP_MGCTX(name) \ > struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name) > > -extern spinlock_t css_set_lock; > extern struct cgroup_subsys *cgroup_subsys[]; > extern struct list_head cgroup_roots; > > diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c > index c487ffe..f41767f 100644 > --- a/kernel/cgroup/cgroup-v1.c > +++ b/kernel/cgroup/cgroup-v1.c > @@ -1263,6 +1263,39 @@ int cgroup1_get_tree(struct fs_context *fc) > return ret; > } > > +/** > + * task_get_cgroup1 - Acquires the associated cgroup of a task within a > + * specific cgroup1 hierarchy. The cgroup1 hierarchy is identified by its > + * hierarchy ID. > + * @tsk: The target task > + * @hierarchy_id: The ID of a cgroup1 hierarchy > + * > + * On success, the cgroup is returned. On failure, ERR_PTR is returned. > + * We limit it to cgroup1 only. > + */ > +struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id) > +{ > + struct cgroup *cgrp = ERR_PTR(-ENOENT); > + struct cgroup_root *root; > + > + rcu_read_lock(); > + for_each_root(root) { > + /* cgroup1 only*/ > + if (root == &cgrp_dfl_root) > + continue; > + if (root->hierarchy_id != hierarchy_id) > + continue; > + spin_lock_irq(&css_set_lock); Considering that the kfunc may be called under IRQ context, should we use spin_lock_irqsave instead ? > + cgrp = task_cgroup_from_root(tsk, root); > + if (!cgrp || !cgroup_tryget(cgrp)) > + cgrp = ERR_PTR(-ENOENT); > + spin_unlock_irq(&css_set_lock); > + break; > + } > + rcu_read_unlock(); > + return cgrp; > +} > + > static int __init cgroup1_wq_init(void) > { > /*