On Mon, Mar 10, 2025 at 04:12:11PM +0100, Clément Léger wrote: > Now that the kernel can handle misaligned accesses in S-mode, request > misaligned access exception delegation from SBI. This uses the FWFT SBI > extension defined in SBI version 3.0. > > Signed-off-by: Clément Léger <cleger@xxxxxxxxxxxx> > --- > arch/riscv/include/asm/cpufeature.h | 3 +- > arch/riscv/kernel/traps_misaligned.c | 77 +++++++++++++++++++++- > arch/riscv/kernel/unaligned_access_speed.c | 11 +++- > 3 files changed, 86 insertions(+), 5 deletions(-) > > diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h > index 569140d6e639..ad7d26788e6a 100644 > --- a/arch/riscv/include/asm/cpufeature.h > +++ b/arch/riscv/include/asm/cpufeature.h > @@ -64,8 +64,9 @@ void __init riscv_user_isa_enable(void); > _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts), _validate) > > bool check_unaligned_access_emulated_all_cpus(void); > +void unaligned_access_init(void); > +int cpu_online_unaligned_access_init(unsigned int cpu); > #if defined(CONFIG_RISCV_SCALAR_MISALIGNED) > -void check_unaligned_access_emulated(struct work_struct *work __always_unused); > void unaligned_emulation_finish(void); > bool unaligned_ctl_available(void); > DECLARE_PER_CPU(long, misaligned_access_speed); > diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c > index 7cc108aed74e..90ac74191357 100644 > --- a/arch/riscv/kernel/traps_misaligned.c > +++ b/arch/riscv/kernel/traps_misaligned.c > @@ -16,6 +16,7 @@ > #include <asm/entry-common.h> > #include <asm/hwprobe.h> > #include <asm/cpufeature.h> > +#include <asm/sbi.h> > #include <asm/vector.h> > > #define INSN_MATCH_LB 0x3 > @@ -635,7 +636,7 @@ bool check_vector_unaligned_access_emulated_all_cpus(void) > > static bool unaligned_ctl __read_mostly; > > -void check_unaligned_access_emulated(struct work_struct *work __always_unused) > +static void check_unaligned_access_emulated(struct work_struct *work __always_unused) > { > int cpu = smp_processor_id(); > long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu); > @@ -646,6 +647,13 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused) > __asm__ __volatile__ ( > " "REG_L" %[tmp], 1(%[ptr])\n" > : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory"); > +} > + > +static int cpu_online_check_unaligned_access_emulated(unsigned int cpu) > +{ > + long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu); > + > + check_unaligned_access_emulated(NULL); > > /* > * If unaligned_ctl is already set, this means that we detected that all > @@ -654,9 +662,10 @@ void check_unaligned_access_emulated(struct work_struct *work __always_unused) > */ > if (unlikely(unaligned_ctl && (*mas_ptr != RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED))) { > pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n"); > - while (true) > - cpu_relax(); > + return -EINVAL; > } > + > + return 0; > } > > bool check_unaligned_access_emulated_all_cpus(void) > @@ -688,4 +697,66 @@ bool check_unaligned_access_emulated_all_cpus(void) > { > return false; > } > +static int cpu_online_check_unaligned_access_emulated(unsigned int cpu) > +{ > + return 0; > +} > #endif > + > +#ifdef CONFIG_RISCV_SBI > + > +static bool misaligned_traps_delegated; > + > +static int cpu_online_sbi_unaligned_setup(unsigned int cpu) > +{ > + if (sbi_fwft_set(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0) && > + misaligned_traps_delegated) { > + pr_crit("Misaligned trap delegation non homogeneous (expected delegated)"); > + return -EINVAL; > + } > + > + return 0; > +} > + > +static void unaligned_sbi_request_delegation(void) > +{ > + int ret; > + > + ret = sbi_fwft_all_cpus_set(SBI_FWFT_MISALIGNED_EXC_DELEG, 1, 0, 0); > + if (ret) > + return; > + > + misaligned_traps_delegated = true; > + pr_info("SBI misaligned access exception delegation ok\n"); > + /* > + * Note that we don't have to take any specific action here, if > + * the delegation is successful, then > + * check_unaligned_access_emulated() will verify that indeed the > + * platform traps on misaligned accesses. > + */ > +} > + > +void unaligned_access_init(void) > +{ > + if (sbi_probe_extension(SBI_EXT_FWFT) > 0) > + unaligned_sbi_request_delegation(); > +} > +#else > +void unaligned_access_init(void) {} > + > +static int cpu_online_sbi_unaligned_setup(unsigned int cpu __always_unused) > +{ > + return 0; > +} > +#endif > + > +int cpu_online_unaligned_access_init(unsigned int cpu) > +{ > + int ret; > + > + ret = cpu_online_sbi_unaligned_setup(cpu); > + if (ret) > + return ret; > + > + return cpu_online_check_unaligned_access_emulated(cpu); > +} > diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c > index 91f189cf1611..2f3aba073297 100644 > --- a/arch/riscv/kernel/unaligned_access_speed.c > +++ b/arch/riscv/kernel/unaligned_access_speed.c > @@ -188,13 +188,20 @@ arch_initcall_sync(lock_and_set_unaligned_access_static_branch); > > static int riscv_online_cpu(unsigned int cpu) > { > + int ret; > static struct page *buf; > > /* We are already set since the last check */ > if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN) > goto exit; > > - check_unaligned_access_emulated(NULL); > + ret = cpu_online_unaligned_access_init(cpu); > + if (ret) > + return ret; > + > + if (per_cpu(misaligned_access_speed, cpu) == RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED) > + goto exit; > + > buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); > if (!buf) { > pr_warn("Allocation failure, not measuring misaligned performance\n"); > @@ -403,6 +410,8 @@ static int check_unaligned_access_all_cpus(void) > { > bool all_cpus_emulated, all_cpus_vec_unsupported; > > + unaligned_access_init(); > + > all_cpus_emulated = check_unaligned_access_emulated_all_cpus(); > all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus(); > > -- > 2.47.2 > Reviewed-by: Andrew Jones <ajones@xxxxxxxxxxxxxxxx>