This is a note to let you know that I've just added the patch titled arm64: Add support for SB barrier and patch in over DSB; ISB sequences to the 4.19-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: arm64-add-support-for-sb-barrier-and-patch-in-over-d.patch and it can be found in the queue-4.19 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. commit 567c3d2a998036a09a05c96310527a5983ed2f38 Author: Will Deacon <will@xxxxxxxxxx> Date: Fri Aug 9 11:43:43 2024 +0100 arm64: Add support for SB barrier and patch in over DSB; ISB sequences [ Upstream commit bd4fb6d270bc423a9a4098108784f7f9254c4e6d ] We currently use a DSB; ISB sequence to inhibit speculation in set_fs(). Whilst this works for current CPUs, future CPUs may implement a new SB barrier instruction which acts as an architected speculation barrier. On CPUs that support it, patch in an SB; NOP sequence over the DSB; ISB sequence and advertise the presence of the new instruction to userspace. Signed-off-by: Will Deacon <will.deacon@xxxxxxx> [ Mark: fixup conflicts ] Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx> Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx> diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index fc3d26c954a40..efabe6c476aa0 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -133,6 +133,19 @@ hint #22 .endm +/* + * Speculation barrier + */ + .macro sb +alternative_if_not ARM64_HAS_SB + dsb nsh + isb +alternative_else + SB_BARRIER_INSN + nop +alternative_endif + .endm + /* * Sanitise a 64-bit bounded index wrt speculation, returning zero if out * of bounds. diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 822a9192c5511..f66bb04fdf2dd 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -34,6 +34,10 @@ #define psb_csync() asm volatile("hint #17" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory") +#define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \ + SB_BARRIER_INSN"nop\n", \ + ARM64_HAS_SB)) + #define mb() dsb(sy) #define rmb() dsb(ld) #define wmb() dsb(st) diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 61fd28522d74f..a7e2378df3d1c 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -56,7 +56,8 @@ #define ARM64_WORKAROUND_1542419 35 #define ARM64_SPECTRE_BHB 36 #define ARM64_WORKAROUND_1742098 37 +#define ARM64_HAS_SB 38 -#define ARM64_NCAPS 38 +#define ARM64_NCAPS 39 #endif /* __ASM_CPUCAPS_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 0a8342de5796a..8f015c20f3e0e 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -97,6 +97,11 @@ #define SET_PSTATE_SSBS(x) __emit_inst(0xd5000000 | REG_PSTATE_SSBS_IMM | \ (!!x)<<8 | 0x1f) +#define __SYS_BARRIER_INSN(CRm, op2, Rt) \ + __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) + +#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) + #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) #define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4) #define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6) @@ -528,6 +533,7 @@ #define ID_AA64ISAR0_AES_SHIFT 4 /* id_aa64isar1 */ +#define ID_AA64ISAR1_SB_SHIFT 36 #define ID_AA64ISAR1_LRCPC_SHIFT 20 #define ID_AA64ISAR1_FCMA_SHIFT 16 #define ID_AA64ISAR1_JSCVT_SHIFT 12 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index e66b0fca99c2f..3c3bf4171f3b6 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -46,8 +46,7 @@ static inline void set_fs(mm_segment_t fs) * Prevent a mispredicted conditional call to set_fs from forwarding * the wrong address limit to access_ok under speculation. */ - dsb(nsh); - isb(); + spec_bar(); /* On user-mode return, check fs is correct */ set_thread_flag(TIF_FSCHECK); diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 2bcd6e4f34740..7784f7cba16cf 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -49,5 +49,6 @@ #define HWCAP_ILRCPC (1 << 26) #define HWCAP_FLAGM (1 << 27) #define HWCAP_SSBS (1 << 28) +#define HWCAP_SB (1 << 29) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d7e73a7963d1b..3f6a2187d0911 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -144,6 +144,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { }; static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0), @@ -1361,6 +1362,16 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_ssbs, }, #endif + { + .desc = "Speculation barrier (SB)", + .capability = ARM64_HAS_SB, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = has_cpuid_feature, + .sys_reg = SYS_ID_AA64ISAR1_EL1, + .field_pos = ID_AA64ISAR1_SB_SHIFT, + .sign = FTR_UNSIGNED, + .min_field_value = 1, + }, {}, }; @@ -1415,6 +1426,7 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = { HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC), HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC), + HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SB), HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT), #ifdef CONFIG_ARM64_SVE HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE), diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 36bd58d8ca11f..9d013e7106a99 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -82,6 +82,7 @@ static const char *const hwcap_str[] = { "ilrcpc", "flagm", "ssbs", + "sb", NULL };