Add a kernel parameter to the unaligned access speed. This allows skiping of the speed tests for unaligned accesses, which often is very slow. Signed-off-by: Jesse Taube <jesse@xxxxxxxxxxxx> --- arch/riscv/kernel/unaligned_access_speed.c | 81 ++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/arch/riscv/kernel/unaligned_access_speed.c b/arch/riscv/kernel/unaligned_access_speed.c index 1548eb10ae4f..02f7a92a5fa0 100644 --- a/arch/riscv/kernel/unaligned_access_speed.c +++ b/arch/riscv/kernel/unaligned_access_speed.c @@ -400,13 +400,94 @@ static int vec_check_unaligned_access_speed_all_cpus(void *unused __always_unuse } #endif +static DEFINE_PER_CPU(long, unaligned_scalar_speed_param) = RISCV_HWPROBE_MISALIGNED_UNKNOWN; + +static int __init set_unaligned_scalar_speed_param(char *str) +{ + cpumask_var_t mask; + int ret, cpu; + long speed = RISCV_HWPROBE_MISALIGNED_UNKNOWN; + + if (!strncmp(str, "fast,", 5)) { + str += 5; + speed = RISCV_HWPROBE_MISALIGNED_FAST; + } + + if (!strncmp(str, "slow,", 5)) { + str += 5; + speed = RISCV_HWPROBE_MISALIGNED_SLOW; + } + if (speed == RISCV_HWPROBE_MISALIGNED_UNKNOWN) { + pr_warn("Invalid unaligned access speed parameter\n"); + return 1; + } + + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; + + ret = cpulist_parse(str, mask); + + for_each_cpu(cpu, mask) + if (per_cpu(unaligned_scalar_speed_param, cpu) == RISCV_HWPROBE_MISALIGNED_UNKNOWN) + per_cpu(unaligned_scalar_speed_param, cpu) = speed; + + free_cpumask_var(mask); + return ret == 0; +} +__setup("unaligned_scalar_speed=", set_unaligned_scalar_speed_param); + +static DEFINE_PER_CPU(long, unaligned_vector_speed_param) = RISCV_HWPROBE_VECTOR_MISALIGNED_UNKNOWN; + +static int __init set_unaligned_vector_speed_param(char *str) +{ + cpumask_var_t mask; + int ret, cpu; + long speed = RISCV_HWPROBE_VECTOR_MISALIGNED_UNKNOWN; + + if (!strncmp(str, "fast,", 5)) { + str += 5; + speed = RISCV_HWPROBE_VECTOR_MISALIGNED_FAST; + } + + if (!strncmp(str, "slow,", 5)) { + str += 5; + speed = RISCV_HWPROBE_VECTOR_MISALIGNED_SLOW; + } + if (speed == RISCV_HWPROBE_VECTOR_MISALIGNED_UNKNOWN) { + pr_warn("Invalid unaligned access speed parameter\n"); + return 1; + } + + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; + + ret = cpulist_parse(str, mask); + + for_each_cpu(cpu, mask) + if (per_cpu(unaligned_vector_speed_param, cpu) == RISCV_HWPROBE_VECTOR_MISALIGNED_UNKNOWN) + per_cpu(unaligned_vector_speed_param, cpu) = speed; + + free_cpumask_var(mask); + return ret == 0; +} +__setup("unaligned_vector_speed=", set_unaligned_vector_speed_param); + static int check_unaligned_access_all_cpus(void) { + int cpu; bool all_cpus_emulated, all_cpus_vec_unsupported; all_cpus_emulated = check_unaligned_access_emulated_all_cpus(); all_cpus_vec_unsupported = check_vector_unaligned_access_emulated_all_cpus(); + for_each_online_cpu(cpu) { + if (per_cpu(misaligned_access_speed, cpu) == RISCV_HWPROBE_MISALIGNED_UNKNOWN) + per_cpu(misaligned_access_speed, cpu) = per_cpu(unaligned_scalar_speed_param, cpu); + + if (per_cpu(vector_misaligned_access, cpu) == RISCV_HWPROBE_VECTOR_MISALIGNED_UNKNOWN) + per_cpu(vector_misaligned_access, cpu) = per_cpu(unaligned_vector_speed_param, cpu); + } + pr_info("\e[31m%s vector unaligned access\e[0m\n", all_cpus_vec_unsupported ? "All CPUs do not support" : "At least one cpu supports"); if (!all_cpus_vec_unsupported && -- 2.45.2