From: Jeff Xu <jeffxu@xxxxxxxxxxxx> It is unlikely that application calls mm syscall, such as mprotect, on already sealed mappings, adding branch prediction hint. Signed-off-by: Jeff Xu <jeffxu@xxxxxxxxxxxx> Suggested-by: Pedro Falcato <pedro.falcato@xxxxxxxxx> --- mm/madvise.c | 2 +- mm/mmap.c | 4 ++-- mm/mprotect.c | 2 +- mm/mremap.c | 4 ++-- mm/mseal.c | 6 +++--- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index f7d589534e82..ea2e4f6981e2 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1442,7 +1442,7 @@ int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int beh * Check if the address range is sealed for do_madvise(). * can_modify_mm_madv assumes we have acquired the lock on MM. */ - if (!can_modify_mm_madv(mm, start, end, behavior)) { + if (unlikely(!can_modify_mm_madv(mm, start, end, behavior))) { error = -EPERM; goto out; } diff --git a/mm/mmap.c b/mm/mmap.c index 4b80076c319e..6bc0b35ee1f9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2697,7 +2697,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, * Prevent unmapping a sealed VMA. * can_modify_mm assumes we have acquired the lock on MM. */ - if (!can_modify_mm(mm, start, end)) + if (unlikely(!can_modify_mm(mm, start, end))) return -EPERM; /* arch_unmap() might do unmaps itself. */ @@ -3120,7 +3120,7 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, * Prevent unmapping a sealed VMA. * can_modify_mm assumes we have acquired the lock on MM. */ - if (!can_modify_mm(mm, start, end)) + if (unlikely(!can_modify_mm(mm, start, end))) return -EPERM; arch_unmap(mm, start, end); diff --git a/mm/mprotect.c b/mm/mprotect.c index b30b2494bfcd..465c6cd9578c 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -748,7 +748,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len, * checking if memory is sealed. * can_modify_mm assumes we have acquired the lock on MM. */ - if (!can_modify_mm(current->mm, start, end)) { + if (unlikely(!can_modify_mm(current->mm, start, end))) { error = -EPERM; goto out; } diff --git a/mm/mremap.c b/mm/mremap.c index d69b438dcf83..223552901f14 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -912,7 +912,7 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len, * * can_modify_mm assumes we have acquired the lock on MM. */ - if (!can_modify_mm(mm, addr, addr + old_len)) + if (unlikely(!can_modify_mm(mm, addr, addr + old_len))) return -EPERM; if (flags & MREMAP_FIXED) { @@ -1087,7 +1087,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len, * Place can_modify_mm here so we can keep the logic related to * shrink/expand together. */ - if (!can_modify_mm(mm, addr, addr + old_len)) { + if (unlikely(!can_modify_mm(mm, addr, addr + old_len))) { ret = -EPERM; goto out; } diff --git a/mm/mseal.c b/mm/mseal.c index daadac4b8125..bf783bba8ed0 100644 --- a/mm/mseal.c +++ b/mm/mseal.c @@ -32,7 +32,7 @@ static inline void set_vma_sealed(struct vm_area_struct *vma) */ static bool can_modify_vma(struct vm_area_struct *vma) { - if (vma_is_sealed(vma)) + if (unlikely(vma_is_sealed(vma))) return false; return true; @@ -75,7 +75,7 @@ bool can_modify_mm(struct mm_struct *mm, unsigned long start, unsigned long end) /* going through each vma to check. */ for_each_vma_range(vmi, vma, end) { - if (!can_modify_vma(vma)) + if (unlikely(!can_modify_vma(vma))) return false; } @@ -100,7 +100,7 @@ bool can_modify_mm_madv(struct mm_struct *mm, unsigned long start, unsigned long /* going through each vma to check. */ for_each_vma_range(vmi, vma, end) - if (is_ro_anon(vma) && !can_modify_vma(vma)) + if (unlikely(is_ro_anon(vma) && !can_modify_vma(vma))) return false; /* Allow by default. */ -- 2.44.0.769.g3c40516874-goog