In preparation for teaching dax_insert_entry() to take live @pgmap references, enable it to return errors. Given the observation that all callers overwrite the passed in entry with the return value, just update @entry in place and convert the return code to a vm_fault_t status. Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Jan Kara <jack@xxxxxxx> Cc: "Darrick J. Wong" <djwong@xxxxxxxxxx> Cc: Jason Gunthorpe <jgg@xxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: John Hubbard <jhubbard@xxxxxxxxxx> Signed-off-by: Dan Williams <dan.j.williams@xxxxxxxxx> --- fs/dax.c | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index aceb587bc27e..d2fb58a7449b 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -853,14 +853,15 @@ static bool dax_fault_is_cow(const struct iomap_iter *iter) * already in the tree, we will skip the insertion and just dirty the PMD as * appropriate. */ -static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, - const struct iomap_iter *iter, void *entry, pfn_t pfn, - unsigned long flags) +static vm_fault_t dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, + const struct iomap_iter *iter, void **pentry, + pfn_t pfn, unsigned long flags) { struct address_space *mapping = vmf->vma->vm_file->f_mapping; void *new_entry = dax_make_entry(pfn, flags); bool dirty = !dax_fault_is_synchronous(iter, vmf->vma); bool cow = dax_fault_is_cow(iter); + void *entry = *pentry; if (dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); @@ -906,7 +907,8 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, xas_set_mark(xas, PAGECACHE_TAG_TOWRITE); xas_unlock_irq(xas); - return entry; + *pentry = entry; + return 0; } static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev, @@ -1154,9 +1156,12 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr)); vm_fault_t ret; - *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE); + ret = dax_insert_entry(xas, vmf, iter, entry, pfn, DAX_ZERO_PAGE); + if (ret) + goto out; ret = vmf_insert_mixed(vmf->vma, vaddr, pfn); +out: trace_dax_load_hole(inode, vmf, ret); return ret; } @@ -1173,6 +1178,7 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, struct page *zero_page; spinlock_t *ptl; pmd_t pmd_entry; + vm_fault_t ret; pfn_t pfn; zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm); @@ -1181,8 +1187,10 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, goto fallback; pfn = page_to_pfn_t(zero_page); - *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, - DAX_PMD | DAX_ZERO_PAGE); + ret = dax_insert_entry(xas, vmf, iter, entry, pfn, + DAX_PMD | DAX_ZERO_PAGE); + if (ret) + return ret; if (arch_needs_pgtable_deposit()) { pgtable = pte_alloc_one(vma->vm_mm); @@ -1534,6 +1542,7 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT; bool write = iter->flags & IOMAP_WRITE; unsigned long entry_flags = pmd ? DAX_PMD : 0; + vm_fault_t ret; int err = 0; pfn_t pfn; void *kaddr; @@ -1558,7 +1567,9 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf, if (err) return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); - *entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, entry_flags); + ret = dax_insert_entry(xas, vmf, iter, entry, pfn, entry_flags); + if (ret) + return ret; if (write && srcmap->type != IOMAP_HOLE && srcmap->addr != iomap->addr) {