CP0 Context has enough room for wraping pgd into its 41-bit PTEBase field. +. For XPHYS, the trick is that pgd is 4kB aligned, and the PABITS <= 48, only save 48 - 12 + 5(for bit[63:59]) = 41 bits, aka. : bit[63:59] | 0000 0000 000 | bit[47:12] | 0000 0000 0000 +. for CKSEG0, only save 29 - 12 = 17 bits +. use CAC_BASE for accessing bit[63:59] of pgd +. let CONFIG_MIPS_PGD_C0_CONTEXT depend on 64bit, and protect build_fast_mips_refill_handler from 32bit building Signed-off-by: Huang Pei <huangpei@xxxxxxxxxxx> --- arch/mips/Kconfig | 1 + arch/mips/mm/tlbex.c | 11 ++++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 2000bb2b0220..517509ad8596 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2142,6 +2142,7 @@ config CPU_SUPPORTS_HUGEPAGES depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA)) config MIPS_PGD_C0_CONTEXT bool + depends on 64BIT default y if 64BIT && (CPU_MIPSR2 || CPU_MIPSR6) && !CPU_XLP # diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index a7521b8f7658..5bb9724578f7 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -848,8 +848,8 @@ void build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, /* Clear lower 23 bits of context. */ uasm_i_dins(p, ptr, 0, 0, 23); - /* 1 0 1 0 1 << 6 xkphys cached */ - uasm_i_ori(p, ptr, ptr, 0x540); + /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */ + uasm_i_ori(p, ptr, ptr, (CAC_BASE >> 53)); uasm_i_drotr(p, ptr, ptr, 11); #elif defined(CONFIG_SMP) UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG); @@ -1106,6 +1106,7 @@ struct mips_huge_tlb_info { bool need_reload_pte; }; +#ifdef CONFIG_64BIT static struct mips_huge_tlb_info build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int tmp, @@ -1164,8 +1165,8 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, if (pgd_reg == -1) { vmalloc_branch_delay_filled = 1; - /* 1 0 1 0 1 << 6 xkphys cached */ - uasm_i_ori(p, ptr, ptr, 0x540); + /* insert bit[63:59] of CAC_BASE into bit[11:6] of ptr */ + uasm_i_ori(p, ptr, ptr, (CAC_BASE >> 53)); uasm_i_drotr(p, ptr, ptr, 11); } @@ -1292,7 +1293,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, return rv; } - +#endif /* * For a 64-bit kernel, we are using the 64-bit XTLB refill exception * because EXL == 0. If we wrap, we can also use the 32 instruction -- 2.25.1