Hi Tony, On 11/9/23 17:09, Tony Luck wrote: > Resctrl resources operate on subsets of CPUs in the system with the > defining attribute of each subset being an instance of a particular > level of cache. E.g. all CPUs sharing an L3 cache would be part of the > same domain. > > In preparation for features that are scoped at the NUMA node level > change the code from explicit references to "cache_level" to a more > generic scope. At this point the only options for this scope are groups > of CPUs that share an L2 cache or L3 cache. > > Clean up the error handling when looking up domains. Report invalid id's > before calling rdt_find_domain() in preparation for better messages when > scope can be other than cache scope. This means that rdt_find_domain() > will never return an error. So remove checks for error from the callsites. > > Signed-off-by: Tony Luck <tony.luck@xxxxxxxxx> > --- > Changes since v10: > Reinette: Move a blank line in domain_add_cpu() > Reniette: Fix warning message in domain_remove_cpu() > > include/linux/resctrl.h | 9 +++-- > arch/x86/kernel/cpu/resctrl/core.c | 42 +++++++++++++++-------- > arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +- > arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 6 +++- > arch/x86/kernel/cpu/resctrl/rdtgroup.c | 5 ++- > 5 files changed, 45 insertions(+), 19 deletions(-) > > diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h > index 66942d7fba7f..7d4eb7df611d 100644 > --- a/include/linux/resctrl.h > +++ b/include/linux/resctrl.h > @@ -144,13 +144,18 @@ struct resctrl_membw { > struct rdt_parse_data; > struct resctrl_schema; > > +enum resctrl_scope { > + RESCTRL_L2_CACHE = 2, > + RESCTRL_L3_CACHE = 3, > +}; > + > /** > * struct rdt_resource - attributes of a resctrl resource > * @rid: The index of the resource > * @alloc_capable: Is allocation available on this machine > * @mon_capable: Is monitor feature available on this machine > * @num_rmid: Number of RMIDs available > - * @cache_level: Which cache level defines scope of this resource > + * @scope: Scope of this resource > * @cache: Cache allocation related data > * @membw: If the component has bandwidth controls, their properties. > * @domains: All domains for this resource > @@ -168,7 +173,7 @@ struct rdt_resource { > bool alloc_capable; > bool mon_capable; > int num_rmid; > - int cache_level; > + enum resctrl_scope scope; > struct resctrl_cache cache; > struct resctrl_membw membw; > struct list_head domains; > diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c > index 19e0681f0435..9f8d87abd751 100644 > --- a/arch/x86/kernel/cpu/resctrl/core.c > +++ b/arch/x86/kernel/cpu/resctrl/core.c > @@ -65,7 +65,7 @@ struct rdt_hw_resource rdt_resources_all[] = { > .r_resctrl = { > .rid = RDT_RESOURCE_L3, > .name = "L3", > - .cache_level = 3, > + .scope = RESCTRL_L3_CACHE, > .domains = domain_init(RDT_RESOURCE_L3), > .parse_ctrlval = parse_cbm, > .format_str = "%d=%0*x", > @@ -79,7 +79,7 @@ struct rdt_hw_resource rdt_resources_all[] = { > .r_resctrl = { > .rid = RDT_RESOURCE_L2, > .name = "L2", > - .cache_level = 2, > + .scope = RESCTRL_L2_CACHE, > .domains = domain_init(RDT_RESOURCE_L2), > .parse_ctrlval = parse_cbm, > .format_str = "%d=%0*x", > @@ -93,7 +93,7 @@ struct rdt_hw_resource rdt_resources_all[] = { > .r_resctrl = { > .rid = RDT_RESOURCE_MBA, > .name = "MB", > - .cache_level = 3, > + .scope = RESCTRL_L3_CACHE, > .domains = domain_init(RDT_RESOURCE_MBA), > .parse_ctrlval = parse_bw, > .format_str = "%d=%*u", > @@ -105,7 +105,7 @@ struct rdt_hw_resource rdt_resources_all[] = { > .r_resctrl = { > .rid = RDT_RESOURCE_SMBA, > .name = "SMBA", > - .cache_level = 3, > + .scope = RESCTRL_L3_CACHE, > .domains = domain_init(RDT_RESOURCE_SMBA), > .parse_ctrlval = parse_bw, > .format_str = "%d=%*u", > @@ -401,9 +401,6 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, > struct rdt_domain *d; > struct list_head *l; > > - if (id < 0) > - return ERR_PTR(-ENODEV); > - > list_for_each(l, &r->domains) { > d = list_entry(l, struct rdt_domain, list); > /* When id is found, return its domain. */ > @@ -491,6 +488,19 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom) > return 0; > } > > +static int get_domain_id_from_scope(int cpu, enum resctrl_scope scope) > +{ > + switch (scope) { > + case RESCTRL_L2_CACHE: > + case RESCTRL_L3_CACHE: > + return get_cpu_cacheinfo_id(cpu, scope); > + default: > + break; > + } > + > + return -EINVAL; > +} This function is meanigfull when you introduce node scope RESCTRL_NODE. Can you please move this to patch 5? Thanks Babu > + > /* > * domain_add_cpu - Add a cpu to a resource's domain list. > * > @@ -506,18 +516,19 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom) > */ > static void domain_add_cpu(int cpu, struct rdt_resource *r) > { > - int id = get_cpu_cacheinfo_id(cpu, r->cache_level); > + int id = get_domain_id_from_scope(cpu, r->scope); > struct list_head *add_pos = NULL; > struct rdt_hw_domain *hw_dom; > struct rdt_domain *d; > int err; > > - d = rdt_find_domain(r, id, &add_pos); > - if (IS_ERR(d)) { > - pr_warn("Couldn't find cache id for CPU %d\n", cpu); > + if (id < 0) { > + pr_warn_once("Can't find domain id for CPU:%d scope:%d for resource %s\n", > + cpu, r->scope, r->name); > return; > } > > + d = rdt_find_domain(r, id, &add_pos); > if (d) { > cpumask_set_cpu(cpu, &d->cpu_mask); > if (r->cache.arch_has_per_cpu_cfg) > @@ -556,13 +567,16 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) > > static void domain_remove_cpu(int cpu, struct rdt_resource *r) > { > - int id = get_cpu_cacheinfo_id(cpu, r->cache_level); > + int id = get_domain_id_from_scope(cpu, r->scope); > struct rdt_hw_domain *hw_dom; > struct rdt_domain *d; > > + if (id < 0) > + return; > + > d = rdt_find_domain(r, id, NULL); > - if (IS_ERR_OR_NULL(d)) { > - pr_warn("Couldn't find cache id for CPU %d\n", cpu); > + if (!d) { > + pr_warn("Couldn't find domain with id=%d for CPU %d\n", id, cpu); > return; > } > hw_dom = resctrl_to_arch_dom(d); > diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c > index beccb0e87ba7..3f8891d57fac 100644 > --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c > +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c > @@ -563,7 +563,7 @@ int rdtgroup_mondata_show(struct seq_file *m, void *arg) > > r = &rdt_resources_all[resid].r_resctrl; > d = rdt_find_domain(r, domid, NULL); > - if (IS_ERR_OR_NULL(d)) { > + if (!d) { > ret = -ENOENT; > goto out; > } > diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c > index 8f559eeae08e..2a682da9f43a 100644 > --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c > +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c > @@ -292,10 +292,14 @@ static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) > */ > static int pseudo_lock_region_init(struct pseudo_lock_region *plr) > { > + enum resctrl_scope scope = plr->s->res->scope; > struct cpu_cacheinfo *ci; > int ret; > int i; > > + if (WARN_ON_ONCE(scope != RESCTRL_L2_CACHE && scope != RESCTRL_L3_CACHE)) > + return -ENODEV; > + > /* Pick the first cpu we find that is associated with the cache. */ > plr->cpu = cpumask_first(&plr->d->cpu_mask); > > @@ -311,7 +315,7 @@ static int pseudo_lock_region_init(struct pseudo_lock_region *plr) > plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); > > for (i = 0; i < ci->num_leaves; i++) { > - if (ci->info_list[i].level == plr->s->res->cache_level) { > + if (ci->info_list[i].level == scope) { > plr->line_size = ci->info_list[i].coherency_line_size; > return 0; > } > diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c > index 69a1de92384a..c44be64d65ec 100644 > --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c > +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c > @@ -1413,10 +1413,13 @@ unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, > unsigned int size = 0; > int num_b, i; > > + if (WARN_ON_ONCE(r->scope != RESCTRL_L2_CACHE && r->scope != RESCTRL_L3_CACHE)) > + return size; > + > num_b = bitmap_weight(&cbm, r->cache.cbm_len); > ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); > for (i = 0; i < ci->num_leaves; i++) { > - if (ci->info_list[i].level == r->cache_level) { > + if (ci->info_list[i].level == r->scope) { > size = ci->info_list[i].size / r->cache.cbm_len * num_b; > break; > } -- Thanks Babu Moger