From: Guo Ren <guoren@xxxxxxxxxxxxxxxxx> The s32ilp32 uses 9 bits as asid_bits because of the xlen=32 limitation of CSR. The xlen of s64ilp32 is 64 bits in width, and the SATP CSR format is the same for Sv32, Sv39, Sv48, and Sv57. So this patch makes asid mechanism support s64ilp32 with maximum num_asids. Signed-off-by: Guo Ren <guoren@xxxxxxxxxxxxxxxxx> Signed-off-by: Guo Ren <guoren@xxxxxxxxxx> --- arch/riscv/mm/context.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 80ce9caba8d2..696a7ca41f55 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -20,9 +20,9 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator); -static unsigned long asid_bits; +static xlen_t asid_bits; static unsigned long num_asids; -static unsigned long asid_mask; +static xlen_t asid_mask; static atomic_long_t current_version; @@ -225,14 +225,18 @@ static inline void set_mm(struct mm_struct *mm, unsigned int cpu) static int __init asids_init(void) { - unsigned long old; + xlen_t old; /* Figure-out number of ASID bits in HW */ old = csr_read(CSR_SATP); asid_bits = old | (SATP_ASID_MASK << SATP_ASID_SHIFT); csr_write(CSR_SATP, asid_bits); asid_bits = (csr_read(CSR_SATP) >> SATP_ASID_SHIFT) & SATP_ASID_MASK; - asid_bits = fls_long(asid_bits); +#if __riscv_xlen == 64 + asid_bits = fls64(asid_bits); +#else + asid_bits = fls(asid_bits); +#endif csr_write(CSR_SATP, old); /* @@ -265,9 +269,9 @@ static int __init asids_init(void) static_branch_enable(&use_asid_allocator); pr_info("ASID allocator using %lu bits (%lu entries)\n", - asid_bits, num_asids); + (ulong)asid_bits, num_asids); } else { - pr_info("ASID allocator disabled (%lu bits)\n", asid_bits); + pr_info("ASID allocator disabled (%lu bits)\n", (ulong)asid_bits); } return 0; -- 2.36.1