The mbm_cntr_assign mode offers several hardware counters that can be assigned to an RMID-event pair and monitor the bandwidth as long as it is assigned. Counters are managed at two levels. The global assignment is tracked using the mbm_cntr_free_map field in the struct resctrl_mon, while domain-specific assignments are tracked using the mbm_cntr_map field in the struct rdt_mon_domain. Allocation begins at the global level and is then applied individually to each domain. Introduce an interface to allocate these counters and update the corresponding domains accordingly. Signed-off-by: Babu Moger <babu.moger@xxxxxxx> --- v7: New patch. Moved all the FS code here. Merged rdtgroup_assign_cntr and rdtgroup_alloc_cntr. Adde new #define MBM_EVENT_ARRAY_INDEX. --- arch/x86/kernel/cpu/resctrl/internal.h | 2 ++ arch/x86/kernel/cpu/resctrl/rdtgroup.c | 46 ++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 57c31615eae7..6a90fc20be5b 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -705,6 +705,8 @@ unsigned int mon_event_config_index_get(u32 evtid); int resctrl_arch_assign_cntr(struct rdt_resource *r, struct rdt_mon_domain *d, enum resctrl_event_id evtid, u32 rmid, u32 closid, u32 cntr_id, bool assign); +int rdtgroup_assign_cntr(struct rdt_resource *r, struct rdtgroup *rdtgrp, + struct rdt_mon_domain *d, enum resctrl_event_id evtid); void rdt_staged_configs_clear(void); bool closid_allocated(unsigned int closid); int resctrl_find_cleanest_closid(void); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 7ad653b4e768..1d45120ff2b5 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -864,6 +864,13 @@ static int rdtgroup_rmid_show(struct kernfs_open_file *of, return ret; } +/* + * Get the counter index for the assignable counter + * 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID + * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID + */ +#define MBM_EVENT_ARRAY_INDEX(_event) ((_event) - 2) + static int rdtgroup_mbm_assign_mode_show(struct kernfs_open_file *of, struct seq_file *s, void *v) { @@ -1898,6 +1905,45 @@ int resctrl_arch_assign_cntr(struct rdt_resource *r, struct rdt_mon_domain *d, return 0; } +/* + * Assign a hardware counter to the group. + * Counter will be assigned to all the domains if rdt_mon_domain is NULL + * else the counter will be allocated to specific domain. + */ +int rdtgroup_assign_cntr(struct rdt_resource *r, struct rdtgroup *rdtgrp, + struct rdt_mon_domain *d, enum resctrl_event_id evtid) +{ + int index = MBM_EVENT_ARRAY_INDEX(evtid); + int cntr_id = rdtgrp->mon.cntr_id[index]; + + /* + * Allocate a new counter id to the group if the counter id is not + * is not assigned already. + */ + if (cntr_id == MON_CNTR_UNSET) { + cntr_id = mbm_cntr_alloc(r); + if (cntr_id < 0) { + rdt_last_cmd_puts("Out of MBM assignable counters\n"); + return -ENOSPC; + } + rdtgrp->mon.cntr_id[index] = cntr_id; + } + + if (!d) { + list_for_each_entry(d, &r->mon_domains, hdr.list) { + resctrl_arch_assign_cntr(r, d, evtid, rdtgrp->mon.rmid, + rdtgrp->closid, cntr_id, true); + set_bit(cntr_id, d->mbm_cntr_map); + } + } else { + resctrl_arch_assign_cntr(r, d, evtid, rdtgrp->mon.rmid, + rdtgrp->closid, cntr_id, true); + set_bit(cntr_id, d->mbm_cntr_map); + } + + return 0; +} + /* rdtgroup information files for one cache resource. */ static struct rftype res_common_files[] = { { -- 2.34.1