Scope of monitoring may be scoped at L3 cache granularity (legacy) or at the node level (systems with Sub NUMA Cluster enabled). Save the struct rdt_resource pointer that was used to initialize the monitor sections of code and use that value instead of the hard-coded RDT_RESOURCE_L3. No functional change. Signed-off-by: Tony Luck <tony.luck@xxxxxxxxx> Reviewed-by: Peter Newman <peternewman@xxxxxxxxxx> --- arch/x86/kernel/cpu/resctrl/monitor.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index ded1fc7cb7cb..9be6ffdd01ae 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -30,6 +30,8 @@ struct rmid_entry { struct list_head list; }; +static struct rdt_resource *mon_resource; + /** * @rmid_free_lru A least recently used list of free RMIDs * These RMIDs are guaranteed to have an occupancy less than the @@ -268,7 +270,7 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, */ void __check_limbo(struct rdt_domain *d, bool force_free) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + struct rdt_resource *r = mon_resource; struct rmid_entry *entry; u32 crmid = 1, nrmid; bool rmid_dirty; @@ -333,7 +335,7 @@ int alloc_rmid(void) static void add_rmid_to_limbo(struct rmid_entry *entry) { - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + struct rdt_resource *r = mon_resource; struct rdt_domain *d; int cpu, err; u64 val = 0; @@ -645,7 +647,7 @@ void cqm_handle_limbo(struct work_struct *work) mutex_lock(&rdtgroup_mutex); - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + r = mon_resource; d = container_of(work, struct rdt_domain, cqm_limbo.work); __check_limbo(d, false); @@ -681,7 +683,7 @@ void mbm_handle_overflow(struct work_struct *work) if (!static_branch_likely(&rdt_mon_enable_key)) goto out_unlock; - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + r = mon_resource; d = container_of(work, struct rdt_domain, mbm_over.work); list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { @@ -759,9 +761,9 @@ static struct mon_evt mbm_local_event = { /* * Initialize the event list for the resource. * - * Note that MBM events are also part of RDT_RESOURCE_L3 resource - * because as per the SDM the total and local memory bandwidth - * are enumerated as part of L3 monitoring. + * Monitor events can either be part of RDT_RESOURCE_L3 resource, + * or they may be per NUMA node on systems with sub-NUMA cluster + * enabled and are then in the RDT_RESOURCE_NODE resource. */ static void l3_mon_evt_init(struct rdt_resource *r) { @@ -773,6 +775,8 @@ static void l3_mon_evt_init(struct rdt_resource *r) list_add_tail(&mbm_total_event.list, &r->evt_list); if (is_mbm_local_enabled()) list_add_tail(&mbm_local_event.list, &r->evt_list); + + mon_resource = r; } int __init rdt_get_mon_l3_config(struct rdt_resource *r) -- 2.40.1