This is a note to let you know that I've just added the patch titled powerpc/64e: Fix early TLB miss with KUAP to the 5.18-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary The filename of the patch is: powerpc-64e-fix-early-tlb-miss-with-kuap.patch and it can be found in the queue-5.18 subdirectory. If you, or anyone else, feels it should not be added to the stable tree, please let <stable@xxxxxxxxxxxxxxx> know about it. >From 09317643117ade87c03158341e87466413fa8f1a Mon Sep 17 00:00:00 2001 From: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Date: Tue, 28 Jun 2022 16:48:54 +0200 Subject: powerpc/64e: Fix early TLB miss with KUAP From: Christophe Leroy <christophe.leroy@xxxxxxxxxx> commit 09317643117ade87c03158341e87466413fa8f1a upstream. With KUAP, the TLB miss handler bails out when an access to user memory is performed with a nul TID. But the normal TLB miss routine which is only used early during boot does the check regardless for all memory areas, not only user memory. By chance there is no early IO or vmalloc access, but when KASAN come we will start having early TLB misses. Fix it by creating a special branch for user accesses similar to the one in the 'bolted' TLB miss handlers. Unfortunately SPRN_MAS1 is now read too early and there are no registers available to preserve it so it will be read a second time. Fixes: 57bc963837f5 ("powerpc/kuap: Wire-up KUAP on book3e/64") Cc: stable@xxxxxxxxxxxxxxx Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Signed-off-by: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Link: https://lore.kernel.org/r/8d6c5859a45935d6e1a336da4dc20be421e8cea7.1656427701.git.christophe.leroy@xxxxxxxxxx Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/powerpc/mm/nohash/tlb_low_64e.S | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S index 8b97c4acfebf..9e9ab3803fb2 100644 --- a/arch/powerpc/mm/nohash/tlb_low_64e.S +++ b/arch/powerpc/mm/nohash/tlb_low_64e.S @@ -583,7 +583,7 @@ itlb_miss_fault_e6500: */ rlwimi r11,r14,32-19,27,27 rlwimi r11,r14,32-16,19,19 - beq normal_tlb_miss + beq normal_tlb_miss_user /* XXX replace the RMW cycles with immediate loads + writes */ 1: mfspr r10,SPRN_MAS1 cmpldi cr0,r15,8 /* Check for vmalloc region */ @@ -626,7 +626,7 @@ itlb_miss_fault_e6500: cmpldi cr0,r15,0 /* Check for user region */ std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */ - beq normal_tlb_miss + beq normal_tlb_miss_user li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */ oris r11,r11,_PAGE_ACCESSED@h @@ -653,6 +653,12 @@ itlb_miss_fault_e6500: * r11 = PTE permission mask * r10 = crap (free to use) */ +normal_tlb_miss_user: +#ifdef CONFIG_PPC_KUAP + mfspr r14,SPRN_MAS1 + rlwinm. r14,r14,0,0x3fff0000 + beq- normal_tlb_miss_access_fault /* KUAP fault */ +#endif normal_tlb_miss: /* So we first construct the page table address. We do that by * shifting the bottom of the address (not the region ID) by @@ -683,11 +689,6 @@ finish_normal_tlb_miss: /* Check if required permissions are met */ andc. r15,r11,r14 bne- normal_tlb_miss_access_fault -#ifdef CONFIG_PPC_KUAP - mfspr r11,SPRN_MAS1 - rlwinm. r10,r11,0,0x3fff0000 - beq- normal_tlb_miss_access_fault /* KUAP fault */ -#endif /* Now we build the MAS: * @@ -709,9 +710,7 @@ finish_normal_tlb_miss: rldicl r10,r14,64-8,64-8 cmpldi cr0,r10,BOOK3E_PAGESZ_4K beq- 1f -#ifndef CONFIG_PPC_KUAP mfspr r11,SPRN_MAS1 -#endif rlwimi r11,r14,31,21,24 rlwinm r11,r11,0,21,19 mtspr SPRN_MAS1,r11 -- 2.37.1 Patches currently in stable-queue which might be from christophe.leroy@xxxxxxxxxx are queue-5.18/powerpc-restore-config_debug_info-in-defconfigs.patch queue-5.18/powerpc-ptdump-fix-display-of-rw-pages-on-fsl_book3e.patch queue-5.18/powerpc-64e-fix-early-tlb-miss-with-kuap.patch