This changes the semantics of BPF_NOSPEC to always insert a speculation barrier. If this is not needed on some architecture, bypass_spec_v4() should instead return true. Consequently, sanitize_stack_spill is renamed to nospec_result. This later allows us to rely on BPF_NOSPEC from v4 to reduce complexity of Spectre v1 verification. Signed-off-by: Luis Gerhorst <luis.gerhorst@xxxxxx> Acked-by: Henriette Herzog <henriette.herzog@xxxxxx> Cc: Maximilian Ott <ott@xxxxxxxxx> Cc: Milan Stephan <milan.stephan@xxxxxx> --- arch/arm64/net/bpf_jit_comp.c | 10 +--------- include/linux/bpf.h | 14 +++++++++++++- include/linux/bpf_verifier.h | 2 +- kernel/bpf/verifier.c | 4 ++-- 4 files changed, 17 insertions(+), 13 deletions(-) diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 8446848edddb..18370a45e8f2 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1508,15 +1508,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, /* speculation barrier */ case BPF_ST | BPF_NOSPEC: - /* - * Nothing required here. - * - * In case of arm64, we rely on the firmware mitigation of - * Speculative Store Bypass as controlled via the ssbd kernel - * parameter. Whenever the mitigation is enabled, it works - * for all of the kernel code with no need to provide any - * additional instructions. - */ + /* TODO: emit(A64_SB) */ break; /* ST: *(size *)(dst + off) = imm */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f3f50e29d639..bd2a2c5f519e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2423,7 +2423,19 @@ static inline bool bpf_bypass_spec_v1(const struct bpf_token *token) static inline bool bpf_bypass_spec_v4(const struct bpf_token *token) { - return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON); +#ifdef ARM64 + /* In case of arm64, we rely on the firmware mitigation of Speculative + * Store Bypass as controlled via the ssbd kernel parameter. Whenever + * the mitigation is enabled, it works for all of the kernel code with + * no need to provide any additional instructions. Therefore, skip + * inserting nospec insns against Spectre v4 if arm64 + * spectre_v4_mitigations_on/dynamic() is true. + */ + bool spec_v4 = arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE; +#else + bool spec_v4 = true; +#endif + return !spec_v4 || cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON); } int bpf_map_new_fd(struct bpf_map *map, int flags); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 32c23f2a3086..2af09d75c7cd 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -561,7 +561,7 @@ struct bpf_insn_aux_data { u64 map_key_state; /* constant (32 bit) key tracking for maps */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ - bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ + bool nospec_result; /* ensure following insns from executing speculatively */ bool zext_dst; /* this insn zero extends dst reg */ bool needs_zext; /* alu op needs to clear upper bits */ bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 60611df77957..5be3bd38f540 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -4904,7 +4904,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env, } if (sanitize) - env->insn_aux_data[insn_idx].sanitize_stack_spill = true; + env->insn_aux_data[insn_idx].nospec_result = true; } err = destroy_if_dynptr_stack_slot(env, state, spi); @@ -20445,7 +20445,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) } if (type == BPF_WRITE && - env->insn_aux_data[i + delta].sanitize_stack_spill) { + env->insn_aux_data[i + delta].nospec_result) { struct bpf_insn patch[] = { *insn, BPF_ST_NOSPEC(), -- 2.48.1