On Thu, Mar 07, 2024 at 02:39:08PM -0800, Reinette Chatre wrote: > Thank you for the example. I find that significantly easier to > understand than a single number in a generic "nodes_per_l3_cache". > Especially with potential confusion surrounding inconsistent "nodes" > between allocation and monitoring. > > How about domain_cpu_list and domain_cpu_map ? Reinette, Like this (my test system doesn't have SNC, so all domains are the same): $ cd /sys/fs/resctrl/info/ $ grep . */domain* L3/domain_cpu_list:0: 0-35,72-107 L3/domain_cpu_list:1: 36-71,108-143 L3/domain_cpu_map:0: 0000,00000fff,ffffff00,0000000f,ffffffff L3/domain_cpu_map:1: ffff,fffff000,000000ff,fffffff0,00000000 L3_MON/domain_cpu_list:0: 0-35,72-107 L3_MON/domain_cpu_list:1: 36-71,108-143 L3_MON/domain_cpu_map:0: 0000,00000fff,ffffff00,0000000f,ffffffff L3_MON/domain_cpu_map:1: ffff,fffff000,000000ff,fffffff0,00000000 MB/domain_cpu_list:0: 0-35,72-107 MB/domain_cpu_list:1: 36-71,108-143 MB/domain_cpu_map:0: 0000,00000fff,ffffff00,0000000f,ffffffff MB/domain_cpu_map:1: ffff,fffff000,000000ff,fffffff0,00000000 The patch to do this is pretty straightforward. -Tony --- diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index ae80170a0d1b..c180b80640e3 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -957,6 +957,20 @@ static int rdt_num_closids_show(struct kernfs_open_file *of, return 0; } +static int rdt_ctrl_cpus_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + struct rdt_ctrl_domain *d; + + list_for_each_entry(d, &r->ctrl_domains, hdr.list) + seq_printf(seq, is_cpu_list(of) ? "%d: %*pbl\n" : "%d: %*pb\n", + d->hdr.id, cpumask_pr_args(&d->hdr.cpu_mask)); + + return 0; +} + static int rdt_default_ctrl_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { @@ -1103,6 +1117,19 @@ static int rdt_num_rmids_show(struct kernfs_open_file *of, return 0; } +static int rdt_mon_cpus_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + struct rdt_mon_domain *d; + + list_for_each_entry(d, &r->mon_domains, hdr.list) + seq_printf(seq, is_cpu_list(of) ? "%d: %*pbl\n" : "%d: %*pb\n", + d->hdr.id, cpumask_pr_args(&d->hdr.cpu_mask)); + + return 0; +} + static int rdt_mon_features_show(struct kernfs_open_file *of, struct seq_file *seq, void *v) { @@ -1810,6 +1837,21 @@ static struct rftype res_common_files[] = { .seq_show = rdt_num_closids_show, .fflags = RFTYPE_CTRL_INFO, }, + { + .name = "domain_cpu_list", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_ctrl_cpus_show, + .flags = RFTYPE_FLAGS_CPUS_LIST, + .fflags = RFTYPE_CTRL_INFO, + }, + { + .name = "domain_cpu_map", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_ctrl_cpus_show, + .fflags = RFTYPE_CTRL_INFO, + }, { .name = "mon_features", .mode = 0444, @@ -1824,6 +1866,21 @@ static struct rftype res_common_files[] = { .seq_show = rdt_num_rmids_show, .fflags = RFTYPE_MON_INFO, }, + { + .name = "domain_cpu_list", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_mon_cpus_show, + .flags = RFTYPE_FLAGS_CPUS_LIST, + .fflags = RFTYPE_MON_INFO, + }, + { + .name = "domain_cpu_map", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_mon_cpus_show, + .fflags = RFTYPE_MON_INFO, + }, { .name = "cbm_mask", .mode = 0444, -- 2.43.0