The QoS slow memory configuration details are available via CPUID_Fn80000020_EDX_x02. Detect the available details and initialize the rest to defaults. Signed-off-by: Babu Moger <babu.moger@xxxxxxx> --- arch/x86/kernel/cpu/resctrl/core.c | 29 +++++++++++++++++++++++++++-- arch/x86/kernel/cpu/resctrl/ctrlmondata.c | 2 +- arch/x86/kernel/cpu/resctrl/internal.h | 1 + arch/x86/kernel/cpu/resctrl/rdtgroup.c | 16 ++++++++++------ 4 files changed, 39 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index c7561c613209..d79f494a4e91 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -231,9 +231,15 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r) struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); union cpuid_0x10_3_eax eax; union cpuid_0x10_x_edx edx; - u32 ebx, ecx; + u32 ebx, ecx, subleaf; + + /* + * Query CPUID_Fn80000020_EDX_x01 for MBA and + * CPUID_Fn80000020_EDX_x02 for SMBA + */ + subleaf = (r->rid == RDT_RESOURCE_SMBA) ? 2 : 1; - cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full); + cpuid_count(0x80000020, subleaf, &eax.full, &ebx, &ecx, &edx.full); hw_res->num_closid = edx.split.cos_max + 1; r->default_ctrl = MAX_MBA_BW_AMD; @@ -756,6 +762,19 @@ static __init bool get_mem_config(void) return false; } +static __init bool get_slow_mem_config(void) +{ + struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_SMBA]; + + if (!rdt_cpu_has(X86_FEATURE_SMBA)) + return false; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + return __rdt_get_mem_config_amd(&hw_res->r_resctrl); + + return false; +} + static __init bool get_rdt_alloc_resources(void) { struct rdt_resource *r; @@ -786,6 +805,9 @@ static __init bool get_rdt_alloc_resources(void) if (get_mem_config()) ret = true; + if (get_slow_mem_config()) + ret = true; + return ret; } @@ -875,6 +897,9 @@ static __init void rdt_init_res_defs_amd(void) } else if (r->rid == RDT_RESOURCE_MBA) { hw_res->msr_base = MSR_IA32_MBA_BW_BASE; hw_res->msr_update = mba_wrmsr_amd; + } else if (r->rid == RDT_RESOURCE_SMBA) { + hw_res->msr_base = MSR_IA32_SMBA_BW_BASE; + hw_res->msr_update = mba_wrmsr_amd; } } } diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index 1dafbdc5ac31..42e2ef6fbdb8 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -210,7 +210,7 @@ static int parse_line(char *line, struct resctrl_schema *s, unsigned long dom_id; if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && - r->rid == RDT_RESOURCE_MBA) { + (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); return -EINVAL; } diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 43d9f6f5a931..16e3c6e03c79 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -14,6 +14,7 @@ #define MSR_IA32_L2_CBM_BASE 0xd10 #define MSR_IA32_MBA_THRTL_BASE 0xd50 #define MSR_IA32_MBA_BW_BASE 0xc0000200 +#define MSR_IA32_SMBA_BW_BASE 0xc0000280 #define MSR_IA32_QM_CTR 0x0c8e #define MSR_IA32_QM_EVTSEL 0x0c8d diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index e5a48f05e787..1271fd1ae2f3 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -1213,7 +1213,7 @@ static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) list_for_each_entry(s, &resctrl_schema_all, list) { r = s->res; - if (r->rid == RDT_RESOURCE_MBA) + if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) continue; has_cache = true; list_for_each_entry(d, &r->domains, list) { @@ -1402,7 +1402,8 @@ static int rdtgroup_size_show(struct kernfs_open_file *of, ctrl = resctrl_arch_get_config(r, d, closid, type); - if (r->rid == RDT_RESOURCE_MBA) + if (r->rid == RDT_RESOURCE_MBA || + r->rid == RDT_RESOURCE_SMBA) size = ctrl; else size = rdtgroup_cbm_to_size(r, d, ctrl); @@ -2845,7 +2846,8 @@ static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) list_for_each_entry(s, &resctrl_schema_all, list) { r = s->res; - if (r->rid == RDT_RESOURCE_MBA) { + if (r->rid == RDT_RESOURCE_MBA || + r->rid == RDT_RESOURCE_SMBA) { rdtgroup_init_mba(r, rdtgrp->closid); if (is_mba_sc(r)) continue; @@ -3287,7 +3289,8 @@ void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) { lockdep_assert_held(&rdtgroup_mutex); - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) + if (supports_mba_mbps() && + (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) mba_sc_domain_destroy(r, d); if (!r->mon_capable) @@ -3354,8 +3357,9 @@ int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) lockdep_assert_held(&rdtgroup_mutex); - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) - /* RDT_RESOURCE_MBA is never mon_capable */ + if (supports_mba_mbps() && + (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_MBA)) + /* RDT_RESOURCE_MBA (or SMBA) is never mon_capable */ return mba_sc_domain_allocate(r, d); if (!r->mon_capable)