From: Dylan Jhong <dylan@xxxxxxxxxxxxx> commit 9a801afd3eb95e1a89aba17321062df06fb49d98 upstream. Currently, we pass the CONTEXTID instead of the ASID to the TLB flush function. We should only take the ASID field to prevent from touching the reserved bit field. Fixes: 3f1e782998cd ("riscv: add ASID-based tlbflushing methods") Signed-off-by: Dylan Jhong <dylan@xxxxxxxxxxxxx> Reviewed-by: Sergey Matyukevich <sergey.matyukevich@xxxxxxxxxxxxx> Link: https://lore.kernel.org/r/20230313034906.2401730-1-dylan@xxxxxxxxxxxxx Cc: stable@xxxxxxxxxxxxxxx Signed-off-by: Palmer Dabbelt <palmer@xxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/riscv/include/asm/tlbflush.h | 2 ++ arch/riscv/mm/context.c | 2 +- arch/riscv/mm/tlbflush.c | 2 +- 3 files changed, 4 insertions(+), 2 deletions(-) --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -12,6 +12,8 @@ #include <asm/errata_list.h> #ifdef CONFIG_MMU +extern unsigned long asid_mask; + static inline void local_flush_tlb_all(void) { __asm__ __volatile__ ("sfence.vma" : : : "memory"); --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -22,7 +22,7 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocat static unsigned long asid_bits; static unsigned long num_asids; -static unsigned long asid_mask; +unsigned long asid_mask; static atomic_long_t current_version; --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -43,7 +43,7 @@ static void __sbi_tlb_flush_range(struct /* check if the tlbflush needs to be sent to other CPUs */ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; if (static_branch_unlikely(&use_asid_allocator)) { - unsigned long asid = atomic_long_read(&mm->context.id); + unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask; if (broadcast) { riscv_cpuid_to_hartid_mask(cmask, &hmask);