If vma->vm_mm != t->mm, there's no need to call page_mapped_in_vma() as add_to_kill() won't be called in this case. Move up the mm check to avoid possible unneeded calling to page_mapped_in_vma(). Signed-off-by: Miaohe Lin <linmiaohe@xxxxxxxxxx> --- mm/memory-failure.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 69c4d1b48ad6..904c2b6284a4 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -521,11 +521,11 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill, anon_vma_interval_tree_foreach(vmac, &av->rb_root, pgoff, pgoff) { vma = vmac->vma; + if (vma->vm_mm != t->mm) + continue; if (!page_mapped_in_vma(page, vma)) continue; - if (vma->vm_mm == t->mm) - add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma, - to_kill); + add_to_kill(t, page, FSDAX_INVALID_PGOFF, vma, to_kill); } } read_unlock(&tasklist_lock); -- 2.23.0