From: Will Deacon <will.deacon@xxxxxxx> commit fc0e1299da54 upstream. In order for code such as TLB invalidation to operate efficiently when the decision to map the kernel at EL0 is determined at runtime, this patch introduces a helper function, arm64_kernel_unmapped_at_el0, to determine whether or not the kernel is mapped whilst running in userspace. Currently, this just reports the value of CONFIG_UNMAP_KERNEL_AT_EL0, but will later be hooked up to a fake CPU capability using a static key. Reviewed-by: Mark Rutland <mark.rutland@xxxxxxx> Tested-by: Laura Abbott <labbott@xxxxxxxxxx> Tested-by: Shanker Donthineni <shankerd@xxxxxxxxxxxxxx> Signed-off-by: Will Deacon <will.deacon@xxxxxxx> Signed-off-by: Alex Shi <alex.shi@xxxxxxxxxx> [v4.9 backport] Signed-off-by: Mark Rutland <mark.rutland@xxxxxxx> [v4.9 backport] --- arch/arm64/include/asm/mmu.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 49924e56048e..279e75b8a49e 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -18,6 +18,8 @@ #define USER_ASID_FLAG (UL(1) << 48) +#ifndef __ASSEMBLY__ + typedef struct { atomic64_t id; void *vdso; @@ -30,6 +32,11 @@ typedef struct { */ #define ASID(mm) ((mm)->context.id.counter & 0xffff) +static inline bool arm64_kernel_unmapped_at_el0(void) +{ + return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0); +} + extern void paging_init(void); extern void bootmem_init(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); @@ -39,4 +46,5 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, pgprot_t prot, bool allow_block_mappings); extern void *fixmap_remap_fdt(phys_addr_t dt_phys); +#endif /* !__ASSEMBLY__ */ #endif -- 2.11.0