Signed-off-by: Lev Olshvang <levonshe@xxxxxxxxx> --- arch/um/include/asm/mmu_context.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h index b4deb1bfbb68..2de21d52bd60 100644 --- a/arch/um/include/asm/mmu_context.h +++ b/arch/um/include/asm/mmu_context.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* +/* * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) */ @@ -28,6 +28,11 @@ static inline void arch_unmap(struct mm_struct *mm, static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write, bool execute, bool foreign) { + #ifdef CONFIG_PROTECT_READONLY_USER_MEMORY + /* Forbid write to PROT_READ pages of foreign process */ + if (write && foreign && (!(vma->vm_flags & VM_WRITE))) + return false; + #endif /* by default, allow everything */ return true; } @@ -52,7 +57,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) up_write(&new->mmap_sem); } -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned cpu = smp_processor_id(); @@ -65,7 +70,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, } } -static inline void enter_lazy_tlb(struct mm_struct *mm, +static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } -- 2.17.1