Reviewed-by: Julius Werner <jwerner@xxxxxxxxxxxx> On Thu, Dec 19, 2024 at 9:54 PM Douglas Anderson <dianders@xxxxxxxxxxxx> wrote: > > The code for detecting CPUs that are vulnerable to Spectre BHB was > based on a hardcoded list of CPU IDs that were known to be affected. > Unfortunately, the list mostly only contained the IDs of standard ARM > cores. The IDs for many cores that are minor variants of the standard > ARM cores (like many Qualcomm Kyro CPUs) weren't listed. This led the > code to assume that those variants were not affected. > > Flip the code on its head and instead assume that a core is vulnerable > if it doesn't have CSV2_3 but is unrecognized as being safe. This > involves creating a "Spectre BHB safe" list. > > As of right now, the only CPU IDs added to the "Spectre BHB safe" list > are ARM Cortex A35, A53, A55, A510, and A520. This list was created by > looking for cores that weren't listed in ARM's list [1] as per review > feedback on v2 of this patch [2]. > > NOTE: this patch will not actually _mitigate_ anyone, it will simply > cause them to report themselves as vulnerable. If any cores in the > system are reported as vulnerable but not mitigated then the whole > system will be reported as vulnerable though the system will attempt > to mitigate with the information it has about the known cores. > > [1] https://developer.arm.com/Arm%20Security%20Center/Spectre-BHB > [2] https://lore.kernel.org/r/20241219175128.GA25477@willie-the-truck > > > Fixes: 558c303c9734 ("arm64: Mitigate spectre style branch history side channels") > Cc: stable@xxxxxxxxxxxxxxx > Signed-off-by: Douglas Anderson <dianders@xxxxxxxxxxxx> > --- > > Changes in v3: > - Don't guess the mitigation; just report unknown cores as vulnerable. > - Restructure the code since is_spectre_bhb_affected() defaults to true > > Changes in v2: > - New > > arch/arm64/include/asm/spectre.h | 1 - > arch/arm64/kernel/proton-pack.c | 144 +++++++++++++++++-------------- > 2 files changed, 77 insertions(+), 68 deletions(-) > > diff --git a/arch/arm64/include/asm/spectre.h b/arch/arm64/include/asm/spectre.h > index 0c4d9045c31f..f1524cdeacf1 100644 > --- a/arch/arm64/include/asm/spectre.h > +++ b/arch/arm64/include/asm/spectre.h > @@ -97,7 +97,6 @@ enum mitigation_state arm64_get_meltdown_state(void); > > enum mitigation_state arm64_get_spectre_bhb_state(void); > bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope); > -u8 spectre_bhb_loop_affected(int scope); > void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused); > bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr); > > diff --git a/arch/arm64/kernel/proton-pack.c b/arch/arm64/kernel/proton-pack.c > index da53722f95d4..06e04c9e6480 100644 > --- a/arch/arm64/kernel/proton-pack.c > +++ b/arch/arm64/kernel/proton-pack.c > @@ -845,52 +845,68 @@ static unsigned long system_bhb_mitigations; > * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any > * SCOPE_SYSTEM call will give the right answer. > */ > -u8 spectre_bhb_loop_affected(int scope) > +static bool is_spectre_bhb_safe(int scope) > +{ > + static const struct midr_range spectre_bhb_safe_list[] = { > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A510), > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A520), > + {}, > + }; > + static bool all_safe = true; > + > + if (scope != SCOPE_LOCAL_CPU) > + return all_safe; > + > + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list)) > + return true; > + > + all_safe = false; > + > + return false; > +} > + > +static u8 spectre_bhb_loop_affected(void) > { > u8 k = 0; > - static u8 max_bhb_k; > - > - if (scope == SCOPE_LOCAL_CPU) { > - static const struct midr_range spectre_bhb_k32_list[] = { > - MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), > - MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), > - MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), > - MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), > - MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), > - MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), > - MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), > - MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), > - {}, > - }; > - static const struct midr_range spectre_bhb_k24_list[] = { > - MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), > - MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), > - MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), > - {}, > - }; > - static const struct midr_range spectre_bhb_k11_list[] = { > - MIDR_ALL_VERSIONS(MIDR_AMPERE1), > - {}, > - }; > - static const struct midr_range spectre_bhb_k8_list[] = { > - MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), > - MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), > - {}, > - }; > - > - if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) > - k = 32; > - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) > - k = 24; > - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) > - k = 11; > - else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) > - k = 8; > - > - max_bhb_k = max(max_bhb_k, k); > - } else { > - k = max_bhb_k; > - } > + > + static const struct midr_range spectre_bhb_k32_list[] = { > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE), > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), > + MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), > + MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), > + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), > + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), > + {}, > + }; > + static const struct midr_range spectre_bhb_k24_list[] = { > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), > + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), > + {}, > + }; > + static const struct midr_range spectre_bhb_k11_list[] = { > + MIDR_ALL_VERSIONS(MIDR_AMPERE1), > + {}, > + }; > + static const struct midr_range spectre_bhb_k8_list[] = { > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), > + MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), > + {}, > + }; > + > + if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list)) > + k = 32; > + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list)) > + k = 24; > + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list)) > + k = 11; > + else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list)) > + k = 8; > > return k; > } > @@ -916,9 +932,8 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void) > } > } > > -static bool is_spectre_bhb_fw_affected(int scope) > +static bool is_spectre_bhb_fw_affected(void) > { > - static bool system_affected; > enum mitigation_state fw_state; > bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE; > static const struct midr_range spectre_bhb_firmware_mitigated_list[] = { > @@ -929,16 +944,8 @@ static bool is_spectre_bhb_fw_affected(int scope) > bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(), > spectre_bhb_firmware_mitigated_list); > > - if (scope != SCOPE_LOCAL_CPU) > - return system_affected; > - > fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); > - if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) { > - system_affected = true; > - return true; > - } > - > - return false; > + return cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED); > } > > static bool supports_ecbhb(int scope) > @@ -954,6 +961,8 @@ static bool supports_ecbhb(int scope) > ID_AA64MMFR1_EL1_ECBHB_SHIFT); > } > > +static u8 max_bhb_k; > + > bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, > int scope) > { > @@ -962,16 +971,18 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, > if (supports_csv2p3(scope)) > return false; > > - if (supports_clearbhb(scope)) > - return true; > - > - if (spectre_bhb_loop_affected(scope)) > - return true; > + if (is_spectre_bhb_safe(scope)) > + return false; > > - if (is_spectre_bhb_fw_affected(scope)) > - return true; > + /* > + * At this point the core isn't known to be "safe" so we're going to > + * assume it's vulnerable. We still need to update `max_bhb_k` though, > + * but only if we aren't mitigating with clearbhb though. > + */ > + if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU)) > + max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected()); > > - return false; > + return true; > } > > static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot) > @@ -1028,7 +1039,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) > this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN); > state = SPECTRE_MITIGATED; > set_bit(BHB_INSN, &system_bhb_mitigations); > - } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) { > + } else if (spectre_bhb_loop_affected()) { > /* > * Ensure KVM uses the indirect vector which will have the > * branchy-loop added. A57/A72-r0 will already have selected > @@ -1041,7 +1052,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry) > this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP); > state = SPECTRE_MITIGATED; > set_bit(BHB_LOOP, &system_bhb_mitigations); > - } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) { > + } else if (is_spectre_bhb_fw_affected()) { > fw_state = spectre_bhb_get_cpu_fw_mitigation_state(); > if (fw_state == SPECTRE_MITIGATED) { > /* > @@ -1100,7 +1111,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt, > { > u8 rd; > u32 insn; > - u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM); > > BUG_ON(nr_inst != 1); /* MOV -> MOV */ > > @@ -1109,7 +1119,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt, > > insn = le32_to_cpu(*origptr); > rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn); > - insn = aarch64_insn_gen_movewide(rd, loop_count, 0, > + insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0, > AARCH64_INSN_VARIANT_64BIT, > AARCH64_INSN_MOVEWIDE_ZERO); > *updptr++ = cpu_to_le32(insn); > -- > 2.47.1.613.gc27f4b7a9f-goog >