With the support of ABMC (Assignable Bandwidth Monitoring Counters) feature, the user has the option to assign or unassign the RMID to hardware counter and monitor the bandwidth for the longer duration. Provide the interface to assign the counter to the group. The ABMC feature implements a pair of MSRs, L3_QOS_ABMC_CFG (MSR C000_03FDh) and L3_QOS_ABMC_DSC (MSR C000_3FEh). Each logical processor implements a separate copy of these registers. Attempts to read or write these MSRs when ABMC is not enabled will result in a #GP(0) exception. Individual assignable bandwidth counters are configured by writing to L3_QOS_ABMC_CFG MSR and specifying the Counter ID, Bandwidth Source, and Bandwidth Types. Reading L3_QOS_ABMC_DSC returns the configuration of the counter specified by L3_QOS_ABMC_CFG [CtrID]. The feature details are documented in the APM listed below [1]. [1] AMD64 Architecture Programmer's Manual Volume 2: System Programming Publication # 24593 Revision 3.41 section 19.3.3.3 Assignable Bandwidth Monitoring (ABMC). Signed-off-by: Babu Moger <babu.moger@xxxxxxx> Link: https://bugzilla.kernel.org/show_bug.cgi?id=206537 --- v3: Removed the static from the prototype of rdtgroup_assign_abmc. The function is not called directly from user anymore. These changes are related to global assignment interface. v2: Minor text changes in commit message. --- arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/rdtgroup.c | 86 ++++++++++++++++++++++++++ 2 files changed, 87 insertions(+) diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 88453c86474b..9d84c80104f9 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -651,6 +651,7 @@ void rdt_domain_reconfigure_cdp(struct rdt_resource *r); void __init resctrl_file_fflags_init(const char *config, unsigned long fflags); void arch_domain_mbm_evt_config(struct rdt_hw_domain *hw_dom); +ssize_t rdtgroup_assign_abmc(struct rdtgroup *rdtgrp, u32 evtid, int mon_state); void rdt_staged_configs_clear(void); bool closid_allocated(unsigned int closid); int resctrl_find_cleanest_closid(void); diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 7f54788a58de..cfbdaf8b5f83 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -202,6 +202,18 @@ static void assign_cntrs_init(void) assign_cntrs_free_map_len = r->mbm_assign_cntrs; } +static int assign_cntrs_alloc(void) +{ + u32 counterid = ffs(assign_cntrs_free_map); + + if (counterid == 0) + return -ENOSPC; + counterid--; + assign_cntrs_free_map &= ~(1 << counterid); + + return counterid; +} + /** * rdtgroup_mode_by_closid - Return mode of resource group with closid * @closid: closid if the resource group @@ -1848,6 +1860,80 @@ static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, return ret ?: nbytes; } +static void rdtgroup_abmc_msrwrite(void *info) +{ + u64 *msrval = info; + + wrmsrl(MSR_IA32_L3_QOS_ABMC_CFG, *msrval); +} + +static void rdtgroup_abmc_domain(struct rdt_domain *d, struct rdtgroup *rdtgrp, + u32 evtid, int index, bool assign) +{ + struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); + union l3_qos_abmc_cfg abmc_cfg = { 0 }; + struct arch_mbm_state *arch_mbm; + + abmc_cfg.split.cfg_en = 1; + abmc_cfg.split.ctr_en = assign ? 1 : 0; + abmc_cfg.split.ctr_id = rdtgrp->mon.abmc_ctr_id[index]; + abmc_cfg.split.bw_src = rdtgrp->mon.rmid; + + /* + * Read the event configuration from the domain and pass it as + * bw_type. + */ + if (evtid == QOS_L3_MBM_TOTAL_EVENT_ID) { + abmc_cfg.split.bw_type = hw_dom->mbm_total_cfg; + arch_mbm = &hw_dom->arch_mbm_total[rdtgrp->mon.rmid]; + } else { + abmc_cfg.split.bw_type = hw_dom->mbm_local_cfg; + arch_mbm = &hw_dom->arch_mbm_local[rdtgrp->mon.rmid]; + } + + smp_call_function_any(&d->cpu_mask, rdtgroup_abmc_msrwrite, &abmc_cfg, 1); + + /* Reset the internal counters */ + if (arch_mbm) + memset(arch_mbm, 0, sizeof(struct arch_mbm_state)); +} + +ssize_t rdtgroup_assign_abmc(struct rdtgroup *rdtgrp, u32 evtid, int mon_state) +{ + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + int counterid = 0, index; + struct rdt_domain *d; + + if (rdtgrp->mon.mon_state & mon_state) { + rdt_last_cmd_puts("ABMC counter is assigned already\n"); + return 0; + } + + index = mon_event_config_index_get(evtid); + if (index == INVALID_CONFIG_INDEX) { + pr_warn_once("Invalid event id %d\n", evtid); + return -EINVAL; + } + + /* + * Allocate a new counter and update domains + */ + counterid = assign_cntrs_alloc(); + if (counterid < 0) { + rdt_last_cmd_puts("Out of ABMC counters\n"); + return -ENOSPC; + } + + rdtgrp->mon.abmc_ctr_id[index] = counterid; + + list_for_each_entry(d, &r->domains, list) + rdtgroup_abmc_domain(d, rdtgrp, evtid, index, 1); + + rdtgrp->mon.mon_state |= mon_state; + + return 0; +} + /* rdtgroup information files for one cache resource. */ static struct rftype res_common_files[] = { { -- 2.34.1