Le 27/06/2022 à 06:58, Anshuman Khandual a écrit : > This just converts the generic vm_get_page_prot() implementation into a new > macro i.e DECLARE_VM_GET_PAGE_PROT which later can be used across platforms > when enabling them with ARCH_HAS_VM_GET_PAGE_PROT. This does not create any > functional change. > > Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> > Cc: linux-mm@xxxxxxxxx > Cc: linux-kernel@xxxxxxxxxxxxxxx > Suggested-by: Christoph Hellwig <hch@xxxxxxxxxxxxx> > Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx> Reviewed-by: Christophe Leroy <christophe.leroy@xxxxxxxxxx> > --- > include/linux/pgtable.h | 28 ++++++++++++++++++++++++++++ > mm/mmap.c | 26 +------------------------- > 2 files changed, 29 insertions(+), 25 deletions(-) > > diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h > index 3cdc16cfd867..014ee8f0fbaa 100644 > --- a/include/linux/pgtable.h > +++ b/include/linux/pgtable.h > @@ -1689,4 +1689,32 @@ typedef unsigned int pgtbl_mod_mask; > #define MAX_PTRS_PER_P4D PTRS_PER_P4D > #endif > > +/* description of effects of mapping type and prot in current implementation. > + * this is due to the limited x86 page protection hardware. The expected > + * behavior is in parens: > + * > + * map_type prot > + * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC > + * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes > + * w: (no) no w: (no) no w: (yes) yes w: (no) no > + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes > + * > + * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes > + * w: (no) no w: (no) no w: (copy) copy w: (no) no > + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes > + * > + * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and > + * MAP_PRIVATE (with Enhanced PAN supported): > + * r: (no) no > + * w: (no) no > + * x: (yes) yes > + */ > +#define DECLARE_VM_GET_PAGE_PROT \ > +pgprot_t vm_get_page_prot(unsigned long vm_flags) \ > +{ \ > + return protection_map[vm_flags & \ > + (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \ > +} \ > +EXPORT_SYMBOL(vm_get_page_prot); > + > #endif /* _LINUX_PGTABLE_H */ > diff --git a/mm/mmap.c b/mm/mmap.c > index b01f0280bda2..b46d5e931bb3 100644 > --- a/mm/mmap.c > +++ b/mm/mmap.c > @@ -81,26 +81,6 @@ static void unmap_region(struct mm_struct *mm, > struct vm_area_struct *vma, struct vm_area_struct *prev, > unsigned long start, unsigned long end); > > -/* description of effects of mapping type and prot in current implementation. > - * this is due to the limited x86 page protection hardware. The expected > - * behavior is in parens: > - * > - * map_type prot > - * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC > - * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes > - * w: (no) no w: (no) no w: (yes) yes w: (no) no > - * x: (no) no x: (no) yes x: (no) yes x: (yes) yes > - * > - * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes > - * w: (no) no w: (no) no w: (copy) copy w: (no) no > - * x: (no) no x: (no) yes x: (no) yes x: (yes) yes > - * > - * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and > - * MAP_PRIVATE (with Enhanced PAN supported): > - * r: (no) no > - * w: (no) no > - * x: (yes) yes > - */ > #ifdef __P000 > pgprot_t protection_map[16] __ro_after_init = { > [VM_NONE] = __P000, > @@ -123,11 +103,7 @@ pgprot_t protection_map[16] __ro_after_init = { > #endif > > #ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT > -pgprot_t vm_get_page_prot(unsigned long vm_flags) > -{ > - return protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]; > -} > -EXPORT_SYMBOL(vm_get_page_prot); > +DECLARE_VM_GET_PAGE_PROT > #endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */ > > static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)