This patch changes out the page_table_lock for the pud_page ptl in the THP fault path; pretty self-explanatory. I got lazy and commented out the spinlock assertion in follow_trans_huge_pmd instead of digging up the pud_page ptl in this function. This is just a proof of concept, so I didn't feel that it was too important to keep around for now. --- mm/huge_memory.c | 4 ++-- mm/memory.c | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a92012a..d3b34e2f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1240,10 +1240,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned int flags) { - struct mm_struct *mm = vma->vm_mm; +// struct mm_struct *mm = vma->vm_mm; struct page *page = NULL; - assert_spin_locked(&mm->page_table_lock); +// assert_spin_locked(&mm->page_table_lock); if (flags & FOLL_WRITE && !pmd_write(*pmd)) goto out; diff --git a/mm/memory.c b/mm/memory.c index af84bc0..5b4e910 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1527,15 +1527,15 @@ struct page *follow_page_mask(struct vm_area_struct *vma, split_huge_page_pmd(vma, address, pmd); goto split_fallthrough; } - spin_lock(&mm->page_table_lock); + spin_lock(&pud_page(*pud)->ptl); if (likely(pmd_trans_huge(*pmd))) { if (unlikely(pmd_trans_splitting(*pmd))) { - spin_unlock(&mm->page_table_lock); + spin_unlock(&pud_page(*pud)->ptl); wait_split_huge_page(vma->anon_vma, pmd); } else { page = follow_trans_huge_pmd(vma, address, pmd, flags); - spin_unlock(&mm->page_table_lock); + spin_unlock(&pud_page(*pud)->ptl); *page_mask = HPAGE_PMD_NR - 1; goto out; } -- 1.7.12.4 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>