This defines and exports a platform specific custom vm_get_page_prot() via subscribing ARCH_HAS_VM_GET_PAGE_PROT. Subsequently all __SXXX and __PXXX macros can be dropped which are no longer needed. Cc: Richard Henderson <rth@xxxxxxxxxxx> Cc: linux-alpha@xxxxxxxxxxxxxxx Cc: linux-kernel@xxxxxxxxxxxxxxx Signed-off-by: Anshuman Khandual <anshuman.khandual@xxxxxxx> --- arch/alpha/Kconfig | 1 + arch/alpha/include/asm/pgtable.h | 17 ------------- arch/alpha/mm/init.c | 41 ++++++++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 17 deletions(-) diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 4e87783c90ad..73e82fe5c770 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -2,6 +2,7 @@ config ALPHA bool default y + select ARCH_HAS_VM_GET_PAGE_PROT select ARCH_32BIT_USTAT_F_TINODE select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h index 02f0429f1068..9fb5e9d10bb6 100644 --- a/arch/alpha/include/asm/pgtable.h +++ b/arch/alpha/include/asm/pgtable.h @@ -116,23 +116,6 @@ struct vm_area_struct; * arch/alpha/mm/fault.c) */ /* xwr */ -#define __P000 _PAGE_P(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) -#define __P001 _PAGE_P(_PAGE_FOE | _PAGE_FOW) -#define __P010 _PAGE_P(_PAGE_FOE) -#define __P011 _PAGE_P(_PAGE_FOE) -#define __P100 _PAGE_P(_PAGE_FOW | _PAGE_FOR) -#define __P101 _PAGE_P(_PAGE_FOW) -#define __P110 _PAGE_P(0) -#define __P111 _PAGE_P(0) - -#define __S000 _PAGE_S(_PAGE_FOE | _PAGE_FOW | _PAGE_FOR) -#define __S001 _PAGE_S(_PAGE_FOE | _PAGE_FOW) -#define __S010 _PAGE_S(_PAGE_FOE) -#define __S011 _PAGE_S(_PAGE_FOE) -#define __S100 _PAGE_S(_PAGE_FOW | _PAGE_FOR) -#define __S101 _PAGE_S(_PAGE_FOW) -#define __S110 _PAGE_S(0) -#define __S111 _PAGE_S(0) /* * pgprot_noncached() is only for infiniband pci support, and a real diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index f6114d03357c..89e5e593194d 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -280,3 +280,44 @@ mem_init(void) high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); } + +pgprot_t vm_get_page_prot(unsigned long vm_flags) +{ + switch (vm_flags & (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)) { + case VM_NONE: + return _PAGE_P(_PAGE_FOE|_PAGE_FOW|_PAGE_FOR); + case VM_READ: + return _PAGE_P(_PAGE_FOE|_PAGE_FOW); + case VM_WRITE: + return _PAGE_P(_PAGE_FOE); + case VM_READ | VM_WRITE: + return _PAGE_P(_PAGE_FOE); + case VM_EXEC: + return _PAGE_P(_PAGE_FOW|_PAGE_FOR); + case VM_EXEC | VM_READ: + return _PAGE_P(_PAGE_FOW); + case VM_EXEC | VM_WRITE: + return _PAGE_P(0); + case VM_EXEC | VM_READ | VM_WRITE: + return _PAGE_P(0); + case VM_SHARED: + return _PAGE_S(_PAGE_FOE|_PAGE_FOW|_PAGE_FOR); + case VM_SHARED | VM_READ: + return _PAGE_S(_PAGE_FOE|_PAGE_FOW); + case VM_SHARED | VM_WRITE: + return _PAGE_S(_PAGE_FOE); + case VM_SHARED | VM_READ | VM_WRITE: + return _PAGE_S(_PAGE_FOE); + case VM_SHARED | VM_EXEC: + return _PAGE_S(_PAGE_FOW|_PAGE_FOR); + case VM_SHARED | VM_EXEC | VM_READ: + return _PAGE_S(_PAGE_FOW); + case VM_SHARED | VM_EXEC | VM_WRITE: + return _PAGE_S(0); + case VM_SHARED | VM_EXEC | VM_READ | VM_WRITE: + return _PAGE_S(0); + default: + BUILD_BUG(); + } +} +EXPORT_SYMBOL(vm_get_page_prot); -- 2.25.1