Hugetlb walker lock makes sure the pte_t* won't go away from under us. One thing to mention is there're two hugetlb_entry() users that can yield the thread within hugetlb_entry(), that'll need to add unlock/lock pair around the yield, meanwhile document hugetlb_entry() explaining the lock for sleepable hugetlb_entry()s. Signed-off-by: Peter Xu <peterx@xxxxxxxxxx> --- arch/s390/mm/gmap.c | 2 ++ fs/proc/task_mmu.c | 2 ++ include/linux/pagewalk.h | 9 ++++++++- mm/pagewalk.c | 2 ++ 4 files changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 02d15c8dc92e..fb2938e8d1c7 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2644,7 +2644,9 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr, end = start + HPAGE_SIZE - 1; __storage_key_init_range(start, end); set_bit(PG_arch_1, &page->flags); + hugetlb_walker_unlock(); cond_resched(); + hugetlb_walker_lock(); return 0; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 89338950afd3..ed750a52e60b 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1612,7 +1612,9 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask, frame++; } + hugetlb_walker_unlock(); cond_resched(); + hugetlb_walker_lock(); return err; } diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index 959f52e5867d..7fa3724c6eb5 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -21,7 +21,14 @@ struct mm_walk; * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD. * Any folded depths (where PTRS_PER_P?D is equal to 1) * are skipped. - * @hugetlb_entry: if set, called for each hugetlb entry + * @hugetlb_entry: if set, called for each hugetlb entry. Note that + * each pte_t* is protected by hugetlb_walker_lock(), + * and the lock does not allow sleep. If explicit + * sleep in the entry fn needed, the caller needs to + * release the lock (hugetlb_walker_unlock()), then + * relock it (hugetlb_walker_lock()) before return. + * After the unlock, the pte_t* may become invalid + * anytime so cannot be accessed anymore. * @test_walk: caller specific callback function to determine whether * we walk over the current vma or not. Returning 0 means * "do page table walk over the current vma", returning diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 7f1c9b274906..abf310011ab1 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -302,6 +302,7 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end, const struct mm_walk_ops *ops = walk->ops; int err = 0; + hugetlb_walker_lock(); do { next = hugetlb_entry_end(h, addr, end); pte = huge_pte_offset(walk->mm, addr & hmask, sz); @@ -314,6 +315,7 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end, if (err) break; } while (addr = next, addr != end); + hugetlb_walker_unlock(); return err; } -- 2.37.3