On 11/24/21 19:10, Joao Martins wrote: > Normally, the @page mapping is set prior to inserting the page into a > page table entry. Make device-dax adhere to the same ordering, rather > than setting mapping after the PTE is inserted. > > The address_space never changes and it is always associated with the > same inode and underlying pages. So, the page mapping is set once but > cleared when the struct pages are removed/freed (i.e. after > {devm_}memunmap_pages()). > > Suggested-by: Jason Gunthorpe <jgg@xxxxxxxxxx> > Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx> > --- > drivers/dax/device.c | 8 ++++++-- > 1 file changed, 6 insertions(+), 2 deletions(-) > > diff --git a/drivers/dax/device.c b/drivers/dax/device.c > index 9c87927d4bc2..0ef9fecec005 100644 > --- a/drivers/dax/device.c > +++ b/drivers/dax/device.c > @@ -121,6 +121,8 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, > > *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); > > + dax_set_mapping(vmf, *pfn, fault_size); > + > return vmf_insert_mixed(vmf->vma, vmf->address, *pfn); > } > > @@ -161,6 +163,8 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, > > *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); > > + dax_set_mapping(vmf, *pfn, fault_size); > + > return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); > } > > @@ -203,6 +207,8 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, > > *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); > > + dax_set_mapping(vmf, *pfn, fault_size); > + > return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); > } > #else > @@ -245,8 +251,6 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, > rc = VM_FAULT_SIGBUS; > } > > - if (rc == VM_FAULT_NOPAGE) > - dax_set_mapping(vmf, pfn, fault_size); > dax_read_unlock(id); > > return rc; > This last chunk is going to spoof out a new warning because @fault_size in dev_dax_huge_fault stops being used after this patch. I've added below chunk for the next version (in addition to Christoph comments in patch 4): @@ -217,7 +223,6 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size) { struct file *filp = vmf->vma->vm_file; - unsigned long fault_size; vm_fault_t rc = VM_FAULT_SIGBUS; int id; pfn_t pfn; @@ -230,23 +235,18 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, id = dax_read_lock(); switch (pe_size) { case PE_SIZE_PTE: - fault_size = PAGE_SIZE; rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn); break; case PE_SIZE_PMD: - fault_size = PMD_SIZE; rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn); break; case PE_SIZE_PUD: - fault_size = PUD_SIZE; rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn); break; default: rc = VM_FAULT_SIGBUS; }