Enable ABMC by default when feature is supported. Counters are assigned automatically on group creation. If the counters are exhausted, report the warnings and continue. It is not required to fail group creation for assignment failures. Users will have the option to modify the assignments. Signed-off-by: Babu Moger <babu.moger@xxxxxxx> --- v4: Few name changes based on the upstream discussion. Commit message update. v3: This is a new patch. Patch addresses the upstream comment to enable ABMC feature by default if the feature is available. --- arch/x86/kernel/cpu/resctrl/rdtgroup.c | 69 ++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 5ea1e58c7201..f452b6d9bb99 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -2756,6 +2756,7 @@ static void rdt_disable_ctx(void) { resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); + resctrl_abmc_disable(RDT_RESOURCE_L3); set_mba_sc(false); resctrl_debug = false; @@ -2786,6 +2787,8 @@ static int rdt_enable_ctx(struct rdt_fs_context *ctx) if (ctx->enable_debug) resctrl_debug = true; + resctrl_abmc_enable(RDT_RESOURCE_L3); + return 0; out_cdpl3: @@ -2882,6 +2885,41 @@ static void schemata_list_destroy(void) } } +static int resctrl_assign_events(struct rdtgroup *rdtgrp) +{ + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); + int ret = 0; + + if (!hw_res->abmc_enabled) + return 0; + + if (is_mbm_total_enabled()) + ret = resctrl_grp_assign(rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID); + + if (!ret && is_mbm_local_enabled()) + ret = resctrl_grp_assign(rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID); + + return ret; +} + +static int resctrl_unassign_events(struct rdtgroup *rdtgrp) +{ + struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); + int ret = 0; + + if (!hw_res->abmc_enabled) + return 0; + + if (is_mbm_total_enabled()) + ret = resctrl_grp_unassign(rdtgrp, QOS_L3_MBM_TOTAL_EVENT_ID); + + if (!ret && is_mbm_local_enabled()) + ret = resctrl_grp_unassign(rdtgrp, QOS_L3_MBM_LOCAL_EVENT_ID); + return ret; +} + static int rdt_get_tree(struct fs_context *fc) { struct rdt_fs_context *ctx = rdt_fc2context(fc); @@ -2941,6 +2979,14 @@ static int rdt_get_tree(struct fs_context *fc) if (ret < 0) goto out_mongrp; rdtgroup_default.mon.mon_data_kn = kn_mondata; + + /* + * Assign the monitor counters if it is available. If it fails, + * report the warnings and continue. It is not nessaccery to + * fail here. + */ + if (resctrl_assign_events(&rdtgroup_default) < 0) + rdt_last_cmd_puts("Monitor assignment failed\n"); } ret = rdt_pseudo_lock_init(); @@ -3214,6 +3260,8 @@ static void rdt_kill_sb(struct super_block *sb) cpus_read_lock(); mutex_lock(&rdtgroup_mutex); + resctrl_unassign_events(&rdtgroup_default); + rdt_disable_ctx(); /*Put everything back to default values. */ @@ -3752,6 +3800,14 @@ static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, goto out_unlock; } + /* + * Assign the monitor counters if it is available. If it fails, + * report the warnings and continue. It is not nessaccery to + * fail here. + */ + if (resctrl_assign_events(rdtgrp) < 0) + rdt_last_cmd_puts("Monitor assignment failed\n"); + kernfs_activate(rdtgrp->kn); /* @@ -3796,6 +3852,14 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, if (ret) goto out_closid_free; + /* + * Assign the monitor counters if it is available. If it fails, + * report the warnings and continue. It is not nessaccery to + * fail here. + */ + if (resctrl_assign_events(rdtgrp) < 0) + rdt_last_cmd_puts("Monitor assignment failed\n"); + kernfs_activate(rdtgrp->kn); ret = rdtgroup_init_alloc(rdtgrp); @@ -3891,6 +3955,9 @@ static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) update_closid_rmid(tmpmask, NULL); rdtgrp->flags = RDT_DELETED; + + resctrl_unassign_events(rdtgrp); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); /* @@ -3937,6 +4004,8 @@ static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); update_closid_rmid(tmpmask, NULL); + resctrl_unassign_events(rdtgrp); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); closid_free(rdtgrp->closid); -- 2.34.1