RCU makes sure the pte_t* won't go away from under us. Please refer to the comment above huge_pte_offset() for more information. Signed-off-by: Peter Xu <peterx@xxxxxxxxxx> --- mm/hugetlb.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5dc87e4e6780..6d336d286394 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5822,6 +5822,8 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, int need_wait_lock = 0; unsigned long haddr = address & huge_page_mask(h); + /* For huge_pte_offset() */ + rcu_read_lock(); ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); if (ptep) { /* @@ -5830,13 +5832,15 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, * not actually modifying content here. */ entry = huge_ptep_get(ptep); + rcu_read_unlock(); if (unlikely(is_hugetlb_entry_migration(entry))) { migration_entry_wait_huge(vma, ptep); return 0; } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) return VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h)); - } + } else + rcu_read_unlock(); /* * Serialize hugepage allocation and instantiation, so that we don't -- 2.37.3