From: Jason Gunthorpe <jgg@xxxxxxxxxxxx> Many of the direct returns of error skipped doing the pte_unmap(). All non zero exit paths must unmap the pte. The pte_unmap() is split unnaturally like this because some of the error exit paths trigger a sleep and must release the lock before sleeping. Fixes: 992de9a8b751 ("mm/hmm: allow to mirror vma of a file on a DAX backed filesystem") Fixes: 53f5c3f489ec ("mm/hmm: factor out pte and pmd handling to simplify hmm_vma_walk_pmd()") Signed-off-by: Jason Gunthorpe <jgg@xxxxxxxxxxxx> --- mm/hmm.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mm/hmm.c b/mm/hmm.c index 72e5a6d9a41756..35f85424176d14 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -325,6 +325,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, } /* Report error for everything else */ + pte_unmap(ptep); *pfn = range->values[HMM_PFN_ERROR]; return -EFAULT; } else { @@ -339,10 +340,13 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, if (pte_devmap(pte)) { hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte), hmm_vma_walk->pgmap); - if (unlikely(!hmm_vma_walk->pgmap)) + if (unlikely(!hmm_vma_walk->pgmap)) { + pte_unmap(ptep); return -EBUSY; + } } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) { if (!is_zero_pfn(pte_pfn(pte))) { + pte_unmap(ptep); *pfn = range->values[HMM_PFN_SPECIAL]; return -EFAULT; } @@ -437,7 +441,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]); if (r) { - /* hmm_vma_handle_pte() did unmap pte directory */ + /* hmm_vma_handle_pte() did pte_unmap() */ hmm_vma_walk->last = addr; return r; } -- 2.25.1