On 11/25/21 11:42, Joao Martins wrote: > On 11/24/21 19:10, Joao Martins wrote: >> @@ -245,8 +251,6 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, >> rc = VM_FAULT_SIGBUS; >> } >> >> - if (rc == VM_FAULT_NOPAGE) >> - dax_set_mapping(vmf, pfn, fault_size); >> dax_read_unlock(id); >> >> return rc; >> > This last chunk is going to spoof out a new warning because @fault_size in > dev_dax_huge_fault stops being used after this patch. > I've added below chunk for the next version (in addition to Christoph comments in > patch 4): > Re-attached as a replacement patch below scissors line. As mentioned earlier, I'll be respinning v7 series with the comments I got on patch 4 and this replacement below. But given the build warning yesterday&today, figured I preemptively attach a replacement for it. ----->8----- >From 2f4cb25c0a6546a27ced4981f0963546f386caec Mon Sep 17 00:00:00 2001 From: Joao Martins <joao.m.martins@xxxxxxxxxx> Date: Tue, 23 Nov 2021 06:00:38 -0500 Subject: [PATCH] device-dax: set mapping prior to vmf_insert_pfn{,_pmd,pud}() Normally, the @page mapping is set prior to inserting the page into a page table entry. Make device-dax adhere to the same ordering, rather than setting mapping after the PTE is inserted. The address_space never changes and it is always associated with the same inode and underlying pages. So, the page mapping is set once but cleared when the struct pages are removed/freed (i.e. after {devm_}memunmap_pages()). Suggested-by: Jason Gunthorpe <jgg@xxxxxxxxxx> Signed-off-by: Joao Martins <joao.m.martins@xxxxxxxxxx> --- drivers/dax/device.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 9c87927d4bc2..19a6b86486ce 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -121,6 +121,8 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax, *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + dax_set_mapping(vmf, *pfn, fault_size); + return vmf_insert_mixed(vmf->vma, vmf->address, *pfn); } @@ -161,6 +163,8 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax, *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + dax_set_mapping(vmf, *pfn, fault_size); + return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); } @@ -203,6 +207,8 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax, *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP); + dax_set_mapping(vmf, *pfn, fault_size); + return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE); } #else @@ -217,7 +223,6 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size) { struct file *filp = vmf->vma->vm_file; - unsigned long fault_size; vm_fault_t rc = VM_FAULT_SIGBUS; int id; pfn_t pfn; @@ -230,23 +235,18 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf, id = dax_read_lock(); switch (pe_size) { case PE_SIZE_PTE: - fault_size = PAGE_SIZE; rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn); break; case PE_SIZE_PMD: - fault_size = PMD_SIZE; rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn); break; case PE_SIZE_PUD: - fault_size = PUD_SIZE; rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn); break; default: rc = VM_FAULT_SIGBUS; } - if (rc == VM_FAULT_NOPAGE) - dax_set_mapping(vmf, pfn, fault_size); dax_read_unlock(id); return rc; -- 2.17.2