Some envcfg bits need to be controlled on a per-thread basis, such as the pointer masking mode. However, the envcfg CSR value cannot simply be stored in struct thread_struct, because some hardware may implement a different subset of envcfg CSR bits is across CPUs. As a result, we need to combine the per-CPU and per-thread bits whenever we switch threads. Signed-off-by: Samuel Holland <samuel.holland@xxxxxxxxxx> --- arch/riscv/include/asm/cpufeature.h | 2 ++ arch/riscv/include/asm/processor.h | 1 + arch/riscv/include/asm/switch_to.h | 12 ++++++++++++ arch/riscv/kernel/cpufeature.c | 4 +++- 4 files changed, 18 insertions(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 0bd11862b760..b1ad8d0b4599 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -33,6 +33,8 @@ DECLARE_PER_CPU(long, misaligned_access_speed); /* Per-cpu ISA extensions. */ extern struct riscv_isainfo hart_isa[NR_CPUS]; +DECLARE_PER_CPU(unsigned long, riscv_cpu_envcfg); + void riscv_user_isa_enable(void); #ifdef CONFIG_RISCV_MISALIGNED diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index a8509cc31ab2..06b87402a4d8 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -118,6 +118,7 @@ struct thread_struct { unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; unsigned long bad_cause; + unsigned long envcfg; u32 riscv_v_flags; u32 vstate_ctrl; struct __riscv_v_ext_state vstate; diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 7efdb0584d47..256a354a5c4a 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -69,6 +69,17 @@ static __always_inline bool has_fpu(void) { return false; } #define __switch_to_fpu(__prev, __next) do { } while (0) #endif +static inline void sync_envcfg(struct task_struct *task) +{ + csr_write(CSR_ENVCFG, this_cpu_read(riscv_cpu_envcfg) | task->thread.envcfg); +} + +static inline void __switch_to_envcfg(struct task_struct *next) +{ + if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG)) + sync_envcfg(next); +} + extern struct task_struct *__switch_to(struct task_struct *, struct task_struct *); @@ -80,6 +91,7 @@ do { \ __switch_to_fpu(__prev, __next); \ if (has_vector()) \ __switch_to_vector(__prev, __next); \ + __switch_to_envcfg(__next); \ ((last) = __switch_to(__prev, __next)); \ } while (0) diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index d1846aab1f78..32aaaf41f8a8 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -44,6 +44,8 @@ static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly; /* Per-cpu ISA extensions. */ struct riscv_isainfo hart_isa[NR_CPUS]; +DEFINE_PER_CPU(unsigned long, riscv_cpu_envcfg); + /* Performance information */ DEFINE_PER_CPU(long, misaligned_access_speed); @@ -978,7 +980,7 @@ arch_initcall(check_unaligned_access_all_cpus); void riscv_user_isa_enable(void) { if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ)) - csr_set(CSR_ENVCFG, ENVCFG_CBZE); + this_cpu_or(riscv_cpu_envcfg, ENVCFG_CBZE); } #ifdef CONFIG_RISCV_ALTERNATIVE -- 2.43.1