+Cc Mark On Mon, 24 May 2021 at 11:26, Jisheng Zhang <Jisheng.Zhang@xxxxxxxxxxxxx> wrote: > > KFENCE requires linear map to be mapped at page granularity, so that > it is possible to protect/unprotect single pages in the KFENCE pool. > Currently if KFENCE is enabled, arm64 maps all pages at page > granularity, it seems overkilled. In fact, we only need to map the > pages in KFENCE pool itself at page granularity. We acchieve this goal > by allocating KFENCE pool before paging_init() so we know the KFENCE > pool address, then we take care to map the pool at page granularity > during map_mem(). > > Signed-off-by: Jisheng Zhang <Jisheng.Zhang@xxxxxxxxxxxxx> > --- > arch/arm64/kernel/setup.c | 3 +++ > arch/arm64/mm/mmu.c | 27 +++++++++++++++++++-------- > 2 files changed, 22 insertions(+), 8 deletions(-) > > diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c > index 61845c0821d9..51c0d6e8b67b 100644 > --- a/arch/arm64/kernel/setup.c > +++ b/arch/arm64/kernel/setup.c > @@ -18,6 +18,7 @@ > #include <linux/screen_info.h> > #include <linux/init.h> > #include <linux/kexec.h> > +#include <linux/kfence.h> > #include <linux/root_dev.h> > #include <linux/cpu.h> > #include <linux/interrupt.h> > @@ -345,6 +346,8 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) > > arm64_memblock_init(); > > + kfence_alloc_pool(); > + > paging_init(); > > acpi_table_upgrade(); > diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c > index 89b66ef43a0f..12712d31a054 100644 > --- a/arch/arm64/mm/mmu.c > +++ b/arch/arm64/mm/mmu.c > @@ -13,6 +13,7 @@ > #include <linux/init.h> > #include <linux/ioport.h> > #include <linux/kexec.h> > +#include <linux/kfence.h> > #include <linux/libfdt.h> > #include <linux/mman.h> > #include <linux/nodemask.h> > @@ -515,10 +516,16 @@ static void __init map_mem(pgd_t *pgdp) > */ > BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); > > - if (rodata_full || crash_mem_map || debug_pagealloc_enabled() || > - IS_ENABLED(CONFIG_KFENCE)) > + if (rodata_full || crash_mem_map || debug_pagealloc_enabled()) > flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; > > + /* > + * KFENCE requires linear map to be mapped at page granularity, so > + * temporarily skip mapping for __kfence_pool in the following > + * for-loop > + */ > + memblock_mark_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE); > + Did you build this with CONFIG_KFENCE unset? I don't think it builds. > /* > * Take care not to create a writable alias for the > * read-only text and rodata sections of the kernel image. > @@ -553,6 +560,15 @@ static void __init map_mem(pgd_t *pgdp) > __map_memblock(pgdp, kernel_start, kernel_end, > PAGE_KERNEL, NO_CONT_MAPPINGS); > memblock_clear_nomap(kernel_start, kernel_end - kernel_start); > + > + /* > + * Map the __kfence_pool at page granularity now. > + */ > + __map_memblock(pgdp, __pa(__kfence_pool), > + __pa(__kfence_pool + KFENCE_POOL_SIZE), > + pgprot_tagged(PAGE_KERNEL), > + NO_EXEC_MAPPINGS | NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); > + memblock_clear_nomap(__pa(__kfence_pool), KFENCE_POOL_SIZE); > } > > void mark_rodata_ro(void) > @@ -1480,12 +1496,7 @@ int arch_add_memory(int nid, u64 start, u64 size, > > VM_BUG_ON(!mhp_range_allowed(start, size, true)); > > - /* > - * KFENCE requires linear map to be mapped at page granularity, so that > - * it is possible to protect/unprotect single pages in the KFENCE pool. > - */ > - if (rodata_full || debug_pagealloc_enabled() || > - IS_ENABLED(CONFIG_KFENCE)) > + if (rodata_full || debug_pagealloc_enabled()) > flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; > > __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), > -- > 2.31.0 > > -- > You received this message because you are subscribed to the Google Groups "kasan-dev" group. > To unsubscribe from this group and stop receiving emails from it, send an email to kasan-dev+unsubscribe@xxxxxxxxxxxxxxxx. > To view this discussion on the web visit https://groups.google.com/d/msgid/kasan-dev/20210524172606.08dac28d%40xhacker.debian.