The mbm_cntr_assign mode offers several counters that can be assigned to an RMID, event pair and monitor the bandwidth as long as it is assigned. Counters are managed at the domain level. Introduce the interface to allocate/free/assign the counters. If the user requests assignments across all domains, some domains may fail if they run out of counters. Ensure assignments continue in other domains wherever possible. Signed-off-by: Babu Moger <babu.moger@xxxxxxx> --- v10: Patch changed completely. Counters are managed at the domain based on the discussion. https://lore.kernel.org/lkml/CALPaoCj+zWq1vkHVbXYP0znJbe6Ke3PXPWjtri5AFgD9cQDCUg@xxxxxxxxxxxxxx/ Reset non-architectural MBM state. Commit message update. v9: Introduced new function resctrl_config_cntr to assign the counter, update the bitmap and reset the architectural state. Taken care of error handling(freeing the counter) when assignment fails. Moved mbm_cntr_assigned_to_domain here as it used in this patch. Minor text changes. v8: Renamed rdtgroup_assign_cntr() to rdtgroup_assign_cntr_event(). Added the code to return the error if rdtgroup_assign_cntr_event fails. Moved definition of MBM_EVENT_ARRAY_INDEX to resctrl/internal.h. Updated typo in the comments. v7: New patch. Moved all the FS code here. Merged rdtgroup_assign_cntr and rdtgroup_alloc_cntr. Adde new #define MBM_EVENT_ARRAY_INDEX. --- arch/x86/kernel/cpu/resctrl/internal.h | 5 +- arch/x86/kernel/cpu/resctrl/monitor.c | 4 +- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 110 +++++++++++++++++++++++++ 3 files changed, 116 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 849bcfe4ea5b..70d2577fc377 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -704,5 +704,8 @@ unsigned int mon_event_config_index_get(u32 evtid); int resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d, enum resctrl_event_id evtid, u32 rmid, u32 closid, u32 cntr_id, bool assign); - +int rdtgroup_assign_cntr_event(struct rdt_resource *r, struct rdtgroup *rdtgrp, + struct rdt_mon_domain *d, enum resctrl_event_id evtid); +struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid, + u32 rmid, enum resctrl_event_id evtid); #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index f857af361af1..8823cd97ff1f 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -575,8 +575,8 @@ void free_rmid(u32 closid, u32 rmid) list_add_tail(&entry->list, &rmid_free_lru); } -static struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid, - u32 rmid, enum resctrl_event_id evtid) +struct mbm_state *get_mbm_state(struct rdt_mon_domain *d, u32 closid, + u32 rmid, enum resctrl_event_id evtid) { u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index e895d2415f22..1c8694a68cf4 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1927,6 +1927,116 @@ int resctrl_arch_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d, return 0; } +/* + * Configure the counter for the event, RMID pair for the domain. + */ +static int resctrl_config_cntr(struct rdt_resource *r, struct rdt_mon_domain *d, + enum resctrl_event_id evtid, u32 rmid, u32 closid, + u32 cntr_id, bool assign) +{ + struct mbm_state *m; + int ret; + + ret = resctrl_arch_config_cntr(r, d, evtid, rmid, closid, cntr_id, assign); + if (ret) + return ret; + + m = get_mbm_state(d, closid, rmid, evtid); + if (m) + memset(m, 0, sizeof(struct mbm_state)); + + return ret; +} + +static bool mbm_cntr_assigned(struct rdt_resource *r, struct rdt_mon_domain *d, + struct rdtgroup *rdtgrp, enum resctrl_event_id evtid) +{ + int cntr_id; + + for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) { + if (d->cntr_cfg[cntr_id].rdtgrp == rdtgrp && + d->cntr_cfg[cntr_id].evtid == evtid) + return true; + } + + return false; +} + +static int mbm_cntr_alloc(struct rdt_resource *r, struct rdt_mon_domain *d, + struct rdtgroup *rdtgrp, enum resctrl_event_id evtid) +{ + int cntr_id; + + for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) { + if (!d->cntr_cfg[cntr_id].rdtgrp) { + d->cntr_cfg[cntr_id].rdtgrp = rdtgrp; + d->cntr_cfg[cntr_id].evtid = evtid; + return cntr_id; + } + } + + return -EINVAL; +} + +static void mbm_cntr_free(struct rdt_resource *r, struct rdt_mon_domain *d, + struct rdtgroup *rdtgrp, enum resctrl_event_id evtid) +{ + int cntr_id; + + for (cntr_id = 0; cntr_id < r->mon.num_mbm_cntrs; cntr_id++) { + if (d->cntr_cfg[cntr_id].rdtgrp == rdtgrp && + d->cntr_cfg[cntr_id].evtid == evtid) + memset(&d->cntr_cfg[cntr_id], 0, sizeof(struct mbm_cntr_cfg)); + } +} + +/* + * Assign a hardware counter to event @evtid of group @rdtgrp. + * Counter will be assigned to all the domains if rdt_mon_domain is NULL + * else the counter will be assigned to specific domain. + */ +int rdtgroup_assign_cntr_event(struct rdt_resource *r, struct rdtgroup *rdtgrp, + struct rdt_mon_domain *d, enum resctrl_event_id evtid) +{ + int cntr_id, ret = 0; + + if (!d) { + list_for_each_entry(d, &r->mon_domains, hdr.list) { + if (mbm_cntr_assigned(r, d, rdtgrp, evtid)) + continue; + + cntr_id = mbm_cntr_alloc(r, d, rdtgrp, evtid); + if (cntr_id < 0) { + rdt_last_cmd_puts("Domain Out of MBM assignable counters\n"); + continue; + } + + ret = resctrl_config_cntr(r, d, evtid, rdtgrp->mon.rmid, + rdtgrp->closid, cntr_id, true); + if (ret) + goto out_done_assign; + } + } else { + if (mbm_cntr_assigned(r, d, rdtgrp, evtid)) + goto out_done_assign; + + cntr_id = mbm_cntr_alloc(r, d, rdtgrp, evtid); + if (cntr_id < 0) { + rdt_last_cmd_puts("Domain Out of MBM assignable counters\n"); + goto out_done_assign; + } + + ret = resctrl_config_cntr(r, d, evtid, rdtgrp->mon.rmid, + rdtgrp->closid, cntr_id, true); + } + +out_done_assign: + if (ret) + mbm_cntr_free(r, d, rdtgrp, evtid); + + return ret; +} + /* rdtgroup information files for one cache resource. */ static struct rftype res_common_files[] = { { -- 2.34.1