Base page PMD faulting is meant to batch handle NUMA hinting faults from PTEs. However, even is no PTE faults would ever be handled within a range the kernel still traps PMD hinting faults. This patch avoids the overhead. Signed-off-by: Mel Gorman <mgorman@xxxxxxx> --- mm/mprotect.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/mm/mprotect.c b/mm/mprotect.c index f0b087d..5aae390 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -146,6 +146,8 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, pmd = pmd_offset(pud, addr); do { + unsigned long this_pages; + next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) @@ -165,8 +167,9 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, } if (pmd_none_or_clear_bad(pmd)) continue; - pages += change_pte_range(vma, pmd, addr, next, newprot, + this_pages = change_pte_range(vma, pmd, addr, next, newprot, dirty_accountable, prot_numa, &all_same_nidpid); + pages += this_pages; /* * If we are changing protections for NUMA hinting faults then @@ -174,7 +177,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, * node. This allows a regular PMD to be handled as one fault * and effectively batches the taking of the PTL */ - if (prot_numa && all_same_nidpid) + if (prot_numa && this_pages && all_same_nidpid) change_pmd_protnuma(vma->vm_mm, addr, pmd); } while (pmd++, addr = next, addr != end); -- 1.8.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>