The quilt patch titled Subject: mm/mmap: define DECLARE_VM_GET_PAGE_PROT has been removed from the -mm tree. Its filename was mm-mmap-define-declare_vm_get_page_prot.patch This patch was dropped because it was merged into the mm-stable branch of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm ------------------------------------------------------ From: Anshuman Khandual <anshuman.khandual@xxxxxxx> Subject: mm/mmap: define DECLARE_VM_GET_PAGE_PROT Date: Mon, 11 Jul 2022 12:35:36 +0530 This just converts the generic vm_get_page_prot() implementation into a new macro i.e DECLARE_VM_GET_PAGE_PROT which later can be used across platforms when enabling them with ARCH_HAS_VM_GET_PAGE_PROT. This does not create any functional change. Link: https://lkml.kernel.org/r/20220711070600.2378316-3-anshuman.khandual@xxxxxxx Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx> Reviewed-by: Christophe Leroy <christophe.leroy@xxxxxxxxxx> Suggested-by: Christoph Hellwig <hch@xxxxxxxxxxxxx> Cc: Arnd Bergmann <arnd@xxxxxxxx> Cc: Brian Cain <bcain@xxxxxxxxxxx> Cc: Catalin Marinas <catalin.marinas@xxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: Chris Zankel <chris@xxxxxxxxxx> Cc: "David S. Miller" <davem@xxxxxxxxxxxxx> Cc: Dinh Nguyen <dinguyen@xxxxxxxxxx> Cc: Geert Uytterhoeven <geert@xxxxxxxxxxxxxx> Cc: Guo Ren <guoren@xxxxxxxxxx> Cc: Heiko Carstens <hca@xxxxxxxxxxxxx> Cc: Huacai Chen <chenhuacai@xxxxxxxxxx> Cc: Ingo Molnar <mingo@xxxxxxxxxx> Cc: "James E.J. Bottomley" <James.Bottomley@xxxxxxxxxxxxxxxxxxxxx> Cc: Jeff Dike <jdike@xxxxxxxxxxx> Cc: Jonas Bonn <jonas@xxxxxxxxxxxx> Cc: Michael Ellerman <mpe@xxxxxxxxxxxxxx> Cc: Michal Simek <monstr@xxxxxxxxx> Cc: Nicholas Piggin <npiggin@xxxxxxxxx> Cc: Palmer Dabbelt <palmer@xxxxxxxxxxx> Cc: Paul Mackerras <paulus@xxxxxxxxx> Cc: Paul Walmsley <paul.walmsley@xxxxxxxxxx> Cc: Richard Henderson <rth@xxxxxxxxxxx> Cc: Rich Felker <dalias@xxxxxxxx> Cc: Russell King <linux@xxxxxxxxxxxxxxx> Cc: Sam Ravnborg <sam@xxxxxxxxxxxx> Cc: Stafford Horne <shorne@xxxxxxxxx> Cc: Thomas Bogendoerfer <tsbogend@xxxxxxxxxxxxxxxx> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx> Cc: Vasily Gorbik <gor@xxxxxxxxxxxxx> Cc: Vineet Gupta <vgupta@xxxxxxxxxx> Cc: WANG Xuerui <kernel@xxxxxxxxxx> Cc: Will Deacon <will@xxxxxxxxxx> Cc: Yoshinori Sato <ysato@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- include/linux/pgtable.h | 28 ++++++++++++++++++++++++++++ mm/mmap.c | 26 +------------------------- 2 files changed, 29 insertions(+), 25 deletions(-) --- a/include/linux/pgtable.h~mm-mmap-define-declare_vm_get_page_prot +++ a/include/linux/pgtable.h @@ -1689,4 +1689,32 @@ typedef unsigned int pgtbl_mod_mask; #define MAX_PTRS_PER_P4D PTRS_PER_P4D #endif +/* description of effects of mapping type and prot in current implementation. + * this is due to the limited x86 page protection hardware. The expected + * behavior is in parens: + * + * map_type prot + * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC + * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes + * w: (no) no w: (no) no w: (yes) yes w: (no) no + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes + * + * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes + * w: (no) no w: (no) no w: (copy) copy w: (no) no + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes + * + * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and + * MAP_PRIVATE (with Enhanced PAN supported): + * r: (no) no + * w: (no) no + * x: (yes) yes + */ +#define DECLARE_VM_GET_PAGE_PROT \ +pgprot_t vm_get_page_prot(unsigned long vm_flags) \ +{ \ + return protection_map[vm_flags & \ + (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \ +} \ +EXPORT_SYMBOL(vm_get_page_prot); + #endif /* _LINUX_PGTABLE_H */ --- a/mm/mmap.c~mm-mmap-define-declare_vm_get_page_prot +++ a/mm/mmap.c @@ -81,26 +81,6 @@ static void unmap_region(struct mm_struc struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); -/* description of effects of mapping type and prot in current implementation. - * this is due to the limited x86 page protection hardware. The expected - * behavior is in parens: - * - * map_type prot - * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC - * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes - * w: (no) no w: (no) no w: (yes) yes w: (no) no - * x: (no) no x: (no) yes x: (no) yes x: (yes) yes - * - * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes - * w: (no) no w: (no) no w: (copy) copy w: (no) no - * x: (no) no x: (no) yes x: (no) yes x: (yes) yes - * - * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and - * MAP_PRIVATE (with Enhanced PAN supported): - * r: (no) no - * w: (no) no - * x: (yes) yes - */ #ifdef __P000 pgprot_t protection_map[16] __ro_after_init = { [VM_NONE] = __P000, @@ -123,11 +103,7 @@ pgprot_t protection_map[16] __ro_after_i #endif #ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT -pgprot_t vm_get_page_prot(unsigned long vm_flags) -{ - return protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; -} -EXPORT_SYMBOL(vm_get_page_prot); +DECLARE_VM_GET_PAGE_PROT #endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */ static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags) _ Patches currently in -mm which might be from anshuman.khandual@xxxxxxx are