Serge reported some crashes with CONFIG_STRICT_KERNEL_RWX enabled on a book3s32 machine. Analysis shows two issues: - BATs addresses and sizes are not properly aligned. - There is a gap between the last address covered by BATs and the first address covered by pages. Memory mapped with DBATs: 0: 0xc0000000-0xc07fffff 0x00000000 Kernel RO coherent 1: 0xc0800000-0xc0bfffff 0x00800000 Kernel RO coherent 2: 0xc0c00000-0xc13fffff 0x00c00000 Kernel RW coherent 3: 0xc1400000-0xc23fffff 0x01400000 Kernel RW coherent 4: 0xc2400000-0xc43fffff 0x02400000 Kernel RW coherent 5: 0xc4400000-0xc83fffff 0x04400000 Kernel RW coherent 6: 0xc8400000-0xd03fffff 0x08400000 Kernel RW coherent 7: 0xd0400000-0xe03fffff 0x10400000 Kernel RW coherent Memory mapped with pages: 0xe1000000-0xefffffff 0x21000000 240M rw present dirty accessed This patch fixes both issues. With the patch, we get the following which is as expected: Memory mapped with DBATs: 0: 0xc0000000-0xc07fffff 0x00000000 Kernel RO coherent 1: 0xc0800000-0xc0bfffff 0x00800000 Kernel RO coherent 2: 0xc0c00000-0xc0ffffff 0x00c00000 Kernel RW coherent 3: 0xc1000000-0xc1ffffff 0x01000000 Kernel RW coherent 4: 0xc2000000-0xc3ffffff 0x02000000 Kernel RW coherent 5: 0xc4000000-0xc7ffffff 0x04000000 Kernel RW coherent 6: 0xc8000000-0xcfffffff 0x08000000 Kernel RW coherent 7: 0xd0000000-0xdfffffff 0x10000000 Kernel RW coherent Memory mapped with pages: 0xe0000000-0xefffffff 0x20000000 256M rw present dirty accessed Reported-by: Serge Belyshev <belyshev@xxxxxxxxxxxxxxxxx> Fixes: 63b2bc619565 ("powerpc/mm/32s: Use BATs for STRICT_KERNEL_RWX") Cc: stable@xxxxxxxxxxxxxxx Signed-off-by: Christophe Leroy <christophe.leroy@xxxxxx> --- arch/powerpc/mm/ppc_mmu_32.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index bf1de3ca39bc..37cf2af98f6a 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -101,7 +101,7 @@ static int find_free_bat(void) static unsigned int block_size(unsigned long base, unsigned long top) { unsigned int max_size = (cpu_has_feature(CPU_FTR_601) ? 8 : 256) << 20; - unsigned int base_shift = (fls(base) - 1) & 31; + unsigned int base_shift = (ffs(base) - 1) & 31; unsigned int block_shift = (fls(top - base) - 1) & 31; return min3(max_size, 1U << base_shift, 1U << block_shift); @@ -157,7 +157,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) { - int done; + unsigned long done; unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET; if (__map_without_bats) { @@ -169,10 +169,10 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) return __mmu_mapin_ram(base, top); done = __mmu_mapin_ram(base, border); - if (done != border - base) + if (done != border) return done; - return done + __mmu_mapin_ram(border, top); + return __mmu_mapin_ram(border, top); } void mmu_mark_initmem_nx(void) -- 2.13.3