Incorporate a mechanism within the context switching code to reset the hardware history for AMD processors. Specifically, when a task is switched in, the class ID was read and reset the hardware workload classification history of CPU firmware and then it start to trigger workload classification for the next running thread. Signed-off-by: Perry Yuan <perry.yuan@xxxxxxx> --- arch/x86/include/asm/hreset.h | 6 ++++++ arch/x86/kernel/cpu/common.c | 18 ++++++++++++++++++ arch/x86/kernel/process_32.c | 3 +++ arch/x86/kernel/process_64.c | 3 +++ 4 files changed, 30 insertions(+) create mode 100644 arch/x86/include/asm/hreset.h diff --git a/arch/x86/include/asm/hreset.h b/arch/x86/include/asm/hreset.h new file mode 100644 index 000000000000..ae1f72602bbd --- /dev/null +++ b/arch/x86/include/asm/hreset.h @@ -0,0 +1,6 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_HRESET_H + +void reset_hardware_history_hetero(void); + +#endif /* _ASM_X86_HRESET_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index d4e539d4e158..2ef34669fcb6 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -57,6 +57,7 @@ #include <asm/mce.h> #include <asm/msr.h> #include <asm/cacheinfo.h> +#include <asm/hreset.h> #include <asm/memtype.h> #include <asm/microcode.h> #include <asm/intel-family.h> @@ -398,6 +399,14 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c) cr4_clear_bits(X86_CR4_UMIP); } +static u32 hardware_history_features __ro_after_init; + +static __always_inline void setup_hreset(struct cpuinfo_x86 *c) +{ + if (cpu_feature_enabled(X86_FEATURE_WORKLOAD_CLASS)) + hardware_history_features = 1; +} + /* These bits should not change their value after CPU init is finished. */ static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE | X86_CR4_CET | X86_CR4_FRED; @@ -1839,6 +1848,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) setup_smep(c); setup_smap(c); setup_umip(c); + setup_hreset(c); /* Enable FSGSBASE instructions if available. */ if (cpu_has(c, X86_FEATURE_FSGSBASE)) { @@ -2392,3 +2402,11 @@ void __init arch_cpu_finalize_init(void) */ mem_encrypt_init(); } + +__always_inline void reset_hardware_history_hetero() +{ + if (!hardware_history_features) + return; + + wrmsrl(AMD_WORKLOAD_HRST, 0x1); +} diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 0917c7f25720..6a3a1339f7a7 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -52,6 +52,7 @@ #include <asm/switch_to.h> #include <asm/vm86.h> #include <asm/resctrl.h> +#include <asm/hreset.h> #include <asm/proto.h> #include "process.h" @@ -213,6 +214,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* Load the Intel cache allocation PQR MSR. */ resctrl_sched_in(next_p); + reset_hardware_history_hetero(); + return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6d3d20e3e43a..096ac69bb8db 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -54,6 +54,7 @@ #include <asm/xen/hypervisor.h> #include <asm/vdso.h> #include <asm/resctrl.h> +#include <asm/hreset.h> #include <asm/unistd.h> #include <asm/fsgsbase.h> #include <asm/fred.h> @@ -709,6 +710,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) /* Load the Intel cache allocation PQR MSR. */ resctrl_sched_in(next_p); + reset_hardware_history_hetero(); + return prev_p; } -- 2.34.1