Pass @vmf to drop the separate @vma and @address arguments to dax_associate_entry(), use the existing DAX flags to convey the @cow argument, and replace the open-coded ALIGN(). Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx> Cc: Jan Kara <jack@xxxxxxx> Cc: "Darrick J. Wong" <djwong@xxxxxxxxxx> Cc: Jason Gunthorpe <jgg@xxxxxxxxxx> Cc: Christoph Hellwig <hch@xxxxxx> Cc: John Hubbard <jhubbard@xxxxxxxxxx> Signed-off-by: Dan Williams <dan.j.williams@xxxxxxxxx> --- fs/dax.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index 8382aab0d2f7..bd5c6b6e371e 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -368,7 +368,7 @@ static inline void dax_mapping_set_cow(struct page *page) * FS_DAX_MAPPING_COW, and use page->index as refcount. */ static void dax_associate_entry(void *entry, struct address_space *mapping, - struct vm_area_struct *vma, unsigned long address, bool cow) + struct vm_fault *vmf, unsigned long flags) { unsigned long size = dax_entry_size(entry), pfn, index; int i = 0; @@ -376,11 +376,11 @@ static void dax_associate_entry(void *entry, struct address_space *mapping, if (IS_ENABLED(CONFIG_FS_DAX_LIMITED)) return; - index = linear_page_index(vma, address & ~(size - 1)); + index = linear_page_index(vmf->vma, ALIGN(vmf->address, size)); for_each_mapped_pfn(entry, pfn) { struct page *page = pfn_to_page(pfn); - if (cow) { + if (flags & DAX_COW) { dax_mapping_set_cow(page); } else { WARN_ON_ONCE(page->mapping); @@ -916,8 +916,7 @@ static vm_fault_t dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, void *old; dax_disassociate_entry(entry, mapping, false); - dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address, - cow); + dax_associate_entry(new_entry, mapping, vmf, flags); /* * Only swap our new entry into the page cache if the current * entry is a zero page or an empty entry. If a normal PTE or