Instead of update mm counter, rmap one by one, batched update brings some level performance gain. Signed-off-by: Yin Fengwei <fengwei.yin@xxxxxxxxx> --- mm/filemap.c | 32 ++++++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index fe0c226c8b1e..6d9438490025 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3360,28 +3360,52 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, struct vm_area_struct *vma = vmf->vma; struct file *file = vma->vm_file; unsigned int mmap_miss = READ_ONCE(file->f_ra.mmap_miss); - int ref_count = 0, count = 0; + int ref_count = 0, count = 0, maplen = 0; + struct page *pg = page; do { - if (PageHWPoison(page)) + if (PageHWPoison(page)) { + if (maplen) { + page_add_file_rmap_range(folio, pg, maplen, + vma, false); + add_mm_counter(vma->vm_mm, + mm_counter_file(pg), maplen); + } + pg = page + 1; + maplen = 0; continue; + } if (mmap_miss > 0) mmap_miss--; - if (!pte_none(*vmf->pte)) + if (!pte_none(*vmf->pte)) { + if (maplen) { + page_add_file_rmap_range(folio, pg, maplen, + vma, false); + add_mm_counter(vma->vm_mm, + mm_counter_file(pg), maplen); + } + pg = page + 1; + maplen = 0; continue; + } if (vmf->address == addr) ret = VM_FAULT_NOPAGE; ref_count++; + maplen++; - do_set_pte(vmf, page, addr); + do_set_pte_entry(vmf, page, addr); update_mmu_cache(vma, addr, vmf->pte); } while (vmf->pte++, page++, addr += PAGE_SIZE, ++count < len); + if (maplen) { + page_add_file_rmap_range(folio, pg, maplen, vma, false); + add_mm_counter(vma->vm_mm, mm_counter_file(pg), maplen); + } folio_ref_add(folio, ref_count); WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss); -- 2.30.2