From: Christoph Hellwig <hch@xxxxxx> Subject: mm: enforce that vmap can't map pages executable To help enforcing the W^X protection don't allow remapping existing pages as executable. x86 bits from Peter Zijlstra, arm64 bits from Mark Rutland. Link: http://lkml.kernel.org/r/20200414131348.444715-20-hch@xxxxxx Signed-off-by: Christoph Hellwig <hch@xxxxxx> Acked-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx> Cc: Mark Rutland <mark.rutland@xxxxxxx>. Cc: Christian Borntraeger <borntraeger@xxxxxxxxxx> Cc: Christophe Leroy <christophe.leroy@xxxxxx> Cc: Daniel Vetter <daniel@xxxxxxxx> Cc: Daniel Vetter <daniel.vetter@xxxxxxxx> Cc: David Airlie <airlied@xxxxxxxx> Cc: Gao Xiang <xiang@xxxxxxxxxx> Cc: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> Cc: Haiyang Zhang <haiyangz@xxxxxxxxxxxxx> Cc: Johannes Weiner <hannes@xxxxxxxxxxx> Cc: "K. Y. Srinivasan" <kys@xxxxxxxxxxxxx> Cc: Laura Abbott <labbott@xxxxxxxxxx> Cc: Michael Kelley <mikelley@xxxxxxxxxxxxx> Cc: Minchan Kim <minchan@xxxxxxxxxx> Cc: Nitin Gupta <ngupta@xxxxxxxxxx> Cc: Robin Murphy <robin.murphy@xxxxxxx> Cc: Sakari Ailus <sakari.ailus@xxxxxxxxxxxxxxx> Cc: Stephen Hemminger <sthemmin@xxxxxxxxxxxxx> Cc: Sumit Semwal <sumit.semwal@xxxxxxxxxx> Cc: Wei Liu <wei.liu@xxxxxxxxxx> Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Heiko Carstens <heiko.carstens@xxxxxxxxxx> Cc: Paul Mackerras <paulus@xxxxxxxxxx> Cc: Vasily Gorbik <gor@xxxxxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/arm64/include/asm/pgtable.h | 3 +++ arch/x86/include/asm/pgtable_types.h | 6 ++++++ include/asm-generic/pgtable.h | 4 ++++ mm/vmalloc.c | 2 +- 4 files changed, 14 insertions(+), 1 deletion(-) --- a/arch/arm64/include/asm/pgtable.h~mm-enforce-that-vmap-cant-map-pages-executable +++ a/arch/arm64/include/asm/pgtable.h @@ -407,6 +407,9 @@ static inline pmd_t pmd_mkdevmap(pmd_t p #define __pgprot_modify(prot,mask,bits) \ __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) +#define pgprot_nx(prot) \ + __pgprot_modify(prot, 0, PTE_PXN) + /* * Mark the prot value as uncacheable and unbufferable. */ --- a/arch/x86/include/asm/pgtable_types.h~mm-enforce-that-vmap-cant-map-pages-executable +++ a/arch/x86/include/asm/pgtable_types.h @@ -282,6 +282,12 @@ typedef struct pgprot { pgprotval_t pgpr typedef struct { pgdval_t pgd; } pgd_t; +static inline pgprot_t pgprot_nx(pgprot_t prot) +{ + return __pgprot(pgprot_val(prot) | _PAGE_NX); +} +#define pgprot_nx pgprot_nx + #ifdef CONFIG_X86_PAE /* --- a/include/asm-generic/pgtable.h~mm-enforce-that-vmap-cant-map-pages-executable +++ a/include/asm-generic/pgtable.h @@ -491,6 +491,10 @@ static inline int arch_unmap_one(struct #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) #endif +#ifndef pgprot_nx +#define pgprot_nx(prot) (prot) +#endif + #ifndef pgprot_noncached #define pgprot_noncached(prot) (prot) #endif --- a/mm/vmalloc.c~mm-enforce-that-vmap-cant-map-pages-executable +++ a/mm/vmalloc.c @@ -2391,7 +2391,7 @@ void *vmap(struct page **pages, unsigned if (!area) return NULL; - if (map_kernel_range((unsigned long)area->addr, size, prot, + if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot), pages) < 0) { vunmap(area->addr); return NULL; _