Hi Babu, On 10/9/24 10:39 AM, Babu Moger wrote: > Assign/unassign counters on resctrl group creation/deletion. Two counters > are required per group, one for MBM total event and one for MBM local > event. > > There are a limited number of counters available for assignment. If these > counters are exhausted, the kernel will display the error message: "Out of > MBM assignable counters". However, it is not necessary to fail the > creation of a group due to assignment failures. Users have the flexibility > to modify the assignments at a later time. > > Signed-off-by: Babu Moger <babu.moger@xxxxxxx> > --- ... > --- > arch/x86/kernel/cpu/resctrl/rdtgroup.c | 64 ++++++++++++++++++++++++++ > 1 file changed, 64 insertions(+) > > diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c > index 791258adcbda..cb2c60c0319e 100644 > --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c > +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c ... > static int rdt_get_tree(struct fs_context *fc) > { > struct rdt_fs_context *ctx = rdt_fc2context(fc); > @@ -2934,6 +2980,8 @@ static int rdt_get_tree(struct fs_context *fc) > if (ret < 0) > goto out_mongrp; > rdtgroup_default.mon.mon_data_kn = kn_mondata; > + > + rdtgroup_assign_cntrs(&rdtgroup_default); > } > > ret = rdt_pseudo_lock_init(); > @@ -2964,6 +3012,7 @@ static int rdt_get_tree(struct fs_context *fc) > out_psl: > rdt_pseudo_lock_release(); > out_mondata: > + rdtgroup_unassign_cntrs(&rdtgroup_default); > if (resctrl_arch_mon_capable()) > kernfs_remove(kn_mondata); I think I mentioned this before ... this addition belongs within the "if (resctrl_arch_mon_capable())" to be symmetrical with where it was called from. > out_mongrp: > @@ -3144,6 +3193,7 @@ static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) > > head = &rdtgrp->mon.crdtgrp_list; > list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { > + rdtgroup_unassign_cntrs(sentry); > free_rmid(sentry->closid, sentry->mon.rmid); > list_del(&sentry->mon.crdtgrp_list); > > @@ -3184,6 +3234,8 @@ static void rmdir_all_sub(void) > cpumask_or(&rdtgroup_default.cpu_mask, > &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); > > + rdtgroup_unassign_cntrs(rdtgrp); > + > free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); > > kernfs_remove(rdtgrp->kn); > @@ -3223,6 +3275,8 @@ static void rdt_kill_sb(struct super_block *sb) > resctrl_arch_disable_alloc(); > if (resctrl_arch_mon_capable()) > resctrl_arch_disable_mon(); > + > + rdtgroup_unassign_cntrs(&rdtgroup_default); Unassigning counters after monitoring is completely disabled seems late. I think this can be moved earlier to be right after the counters of all the other groups are unassigned. > resctrl_mounted = false; > kernfs_kill_sb(sb); > mutex_unlock(&rdtgroup_mutex); > @@ -3814,6 +3868,8 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, > goto out_unlock; > } > > + rdtgroup_assign_cntrs(rdtgrp); > + > kernfs_activate(rdtgrp->kn); > > /* > @@ -3858,6 +3914,8 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, > if (ret) > goto out_closid_free; > > + rdtgroup_assign_cntrs(rdtgrp); > + > kernfs_activate(rdtgrp->kn); > > ret = rdtgroup_init_alloc(rdtgrp); > @@ -3883,6 +3941,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, > out_del_list: > list_del(&rdtgrp->rdtgroup_list); > out_rmid_free: > + rdtgroup_unassign_cntrs(rdtgrp); > mkdir_rdt_prepare_rmid_free(rdtgrp); > out_closid_free: > closid_free(closid); > @@ -3953,6 +4012,9 @@ static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) > update_closid_rmid(tmpmask, NULL); > > rdtgrp->flags = RDT_DELETED; > + > + rdtgroup_unassign_cntrs(rdtgrp); > + > free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); > > /* > @@ -3999,6 +4061,8 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) > cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); > update_closid_rmid(tmpmask, NULL); > > + rdtgroup_unassign_cntrs(rdtgrp); > + > free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); > closid_free(rdtgrp->closid); > Reinette