There is just a single caller using hmm_vma_walk_hole_ for the non-fault case. Use hmm_pfns_fill to fill the whole pfn array with zeroes in only caller for the non-fault case and remove the non-fault path from hmm_vma_walk_hole_. Also rename the function to hmm_vma_fault to better describe what it does. Signed-off-by: Christoph Hellwig <hch@xxxxxx> --- mm/hmm.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/mm/hmm.c b/mm/hmm.c index 6d636373181a..707edba850de 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -73,45 +73,42 @@ static int hmm_pfns_fill(unsigned long addr, unsigned long end, } /* - * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s) + * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s) * @addr: range virtual start address (inclusive) * @end: range virtual end address (exclusive) * @fault: should we fault or not ? * @write_fault: write fault ? * @walk: mm_walk structure - * Return: 0 on success, -EBUSY after page fault, or page fault error + * Return: -EBUSY after page fault, or page fault error * * This function will be called whenever pmd_none() or pte_none() returns true, * or whenever there is no page directory covering the virtual address range. */ -static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end, +static int hmm_vma_fault(unsigned long addr, unsigned long end, bool fault, bool write_fault, struct mm_walk *walk) { struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; uint64_t *pfns = range->pfns; - unsigned long i; + unsigned long i = (addr - range->start) >> PAGE_SHIFT; + WARN_ON_ONCE(!fault && !write_fault); hmm_vma_walk->last = addr; - i = (addr - range->start) >> PAGE_SHIFT; if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE)) return -EPERM; for (; addr < end; addr += PAGE_SIZE, i++) { - pfns[i] = range->values[HMM_PFN_NONE]; - if (fault || write_fault) { - int ret; + int ret; - ret = hmm_vma_do_fault(walk, addr, write_fault, - &pfns[i]); - if (ret != -EBUSY) - return ret; - } + pfns[i] = range->values[HMM_PFN_NONE]; + ret = hmm_vma_do_fault(walk, addr, write_fault, &pfns[i]); + if (ret != -EBUSY) + return ret; } - return (fault || write_fault) ? -EBUSY : 0; + return -EBUSY; } static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk, @@ -193,7 +190,10 @@ static int hmm_vma_walk_hole(unsigned long addr, unsigned long end, pfns = &range->pfns[i]; hmm_range_need_fault(hmm_vma_walk, pfns, npages, 0, &fault, &write_fault); - return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); + if (fault || write_fault) + return hmm_vma_fault(addr, end, fault, write_fault, walk); + hmm_vma_walk->last = addr; + return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE); } static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) @@ -221,7 +221,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr, &fault, &write_fault); if (fault || write_fault) - return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); + return hmm_vma_fault(addr, end, fault, write_fault, walk); pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) { @@ -352,7 +352,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, fault: pte_unmap(ptep); /* Fault any virtual address we were asked to fault */ - return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); + return hmm_vma_fault(addr, end, fault, write_fault, walk); } static int hmm_vma_walk_pmd(pmd_t *pmdp, @@ -494,7 +494,7 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end, cpu_flags, &fault, &write_fault); if (fault || write_fault) { spin_unlock(ptl); - return hmm_vma_walk_hole_(addr, end, fault, write_fault, + return hmm_vma_fault(addr, end, fault, write_fault, walk); } @@ -550,7 +550,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask, &fault, &write_fault); if (fault || write_fault) { spin_unlock(ptl); - return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk); + return hmm_vma_fault(addr, end, fault, write_fault, walk); } pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT); -- 2.24.1