From: James Hogan <james.hogan@xxxxxxxxxx> Performing an MTHC0 instruction without XPA being present will trigger a reserved instruction exception, therefore conditionalise the use of this instruction when building TLB handlers (build_update_entries()), and in __update_tlb(). This allows an XPA kernel to run on non XPA hardware without that instruction implemented, just like it can run on XPA capable hardware without XPA in use (with the noxpa kernel argument) or with XPA not configured in hardware. [paul.burton@xxxxxxxxxx: - Rebase atop other TLB work. - Add "mm" to subject. - Handle the __kmap_pgprot case.] Fixes: c5b367835cfc ("MIPS: Add support for XPA.") Signed-off-by: James Hogan <james.hogan@xxxxxxxxxx> Signed-off-by: Paul Burton <paul.burton@xxxxxxxxxx> Cc: Ralf Baechle <ralf@xxxxxxxxxxxxxx> Cc: linux-mips@xxxxxxxxxxxxxx --- arch/mips/mm/init.c | 12 +++++++----- arch/mips/mm/tlb-r4k.c | 6 ++++-- arch/mips/mm/tlbex.c | 16 ++++++++++------ 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 0e57893..1588409 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -111,11 +111,13 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) write_c0_entryhi(vaddr & (PAGE_MASK << 1)); write_c0_entrylo0(entrylo); write_c0_entrylo1(entrylo); -#ifdef CONFIG_XPA - entrylo = (pte.pte_low & _PFNX_MASK); - writex_c0_entrylo0(entrylo); - writex_c0_entrylo1(entrylo); -#endif + + if (config_enabled(CONFIG_XPA) && cpu_has_xpa) { + entrylo = (pte.pte_low & _PFNX_MASK); + writex_c0_entrylo0(entrylo); + writex_c0_entrylo1(entrylo); + } + tlbidx = read_c0_wired(); write_c0_wired(tlbidx + 1); write_c0_index(tlbidx); diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index c17d762..b99695c 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -336,10 +336,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) #ifdef CONFIG_XPA write_c0_entrylo0(pte_to_entrylo(ptep->pte_high)); - writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); + if (cpu_has_xpa) + writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK); ptep++; write_c0_entrylo1(pte_to_entrylo(ptep->pte_high)); - writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); + if (cpu_has_xpa) + writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK); #else write_c0_entrylo0(ptep->pte_high); ptep++; diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 45234ad..4b41859 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -1023,17 +1023,21 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, tmp, C0_ENTRYLO0); - uasm_i_lw(p, tmp, 0, ptep); - uasm_i_ext(p, tmp, tmp, 0, 24); - uasm_i_mthc0(p, tmp, C0_ENTRYLO0); + if (cpu_has_xpa && !mips_xpa_disabled) { + uasm_i_lw(p, tmp, 0, ptep); + uasm_i_ext(p, tmp, tmp, 0, 24); + uasm_i_mthc0(p, tmp, C0_ENTRYLO0); + } uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); UASM_i_MTC0(p, tmp, C0_ENTRYLO1); - uasm_i_lw(p, tmp, sizeof(pte_t), ptep); - uasm_i_ext(p, tmp, tmp, 0, 24); - uasm_i_mthc0(p, tmp, C0_ENTRYLO1); + if (cpu_has_xpa && !mips_xpa_disabled) { + uasm_i_lw(p, tmp, sizeof(pte_t), ptep); + uasm_i_ext(p, tmp, tmp, 0, 24); + uasm_i_mthc0(p, tmp, C0_ENTRYLO1); + } return; } -- 2.8.0