On Thu, May 13, 2021 at 07:57:47AM +0000, ruansy.fnst@xxxxxxxxxxx wrote: > > -----Original Message----- > > From: Darrick J. Wong <djwong@xxxxxxxxxx> > > Subject: Re: [PATCH v5 1/7] fsdax: Introduce dax_iomap_cow_copy() > > > > On Tue, May 11, 2021 at 11:09:27AM +0800, Shiyang Ruan wrote: > > > In the case where the iomap is a write operation and iomap is not > > > equal to srcmap after iomap_begin, we consider it is a CoW operation. > > > > > > The destance extent which iomap indicated is new allocated extent. > > > So, it is needed to copy the data from srcmap to new allocated extent. > > > In theory, it is better to copy the head and tail ranges which is > > > outside of the non-aligned area instead of copying the whole aligned > > > range. But in dax page fault, it will always be an aligned range. So, > > > we have to copy the whole range in this case. > > > > > > Signed-off-by: Shiyang Ruan <ruansy.fnst@xxxxxxxxxxx> > > > Reviewed-by: Christoph Hellwig <hch@xxxxxx> > > > --- > > > fs/dax.c | 86 > > > ++++++++++++++++++++++++++++++++++++++++++++++++++++---- > > > 1 file changed, 81 insertions(+), 5 deletions(-) > > > > > > diff --git a/fs/dax.c b/fs/dax.c > > > index bf3fc8242e6c..f0249bb1d46a 100644 > > > --- a/fs/dax.c > > > +++ b/fs/dax.c > > > @@ -1038,6 +1038,61 @@ static int dax_iomap_direct_access(struct iomap > > *iomap, loff_t pos, size_t size, > > > return rc; > > > } > > > > > > +/** > > > + * dax_iomap_cow_copy(): Copy the data from source to destination before > > write. > > > + * @pos: address to do copy from. > > > + * @length: size of copy operation. > > > + * @align_size: aligned w.r.t align_size (either PMD_SIZE or PAGE_SIZE) > > > + * @srcmap: iomap srcmap > > > + * @daddr: destination address to copy to. > > > + * > > > + * This can be called from two places. Either during DAX write fault, > > > +to copy > > > + * the length size data to daddr. Or, while doing normal DAX write > > > +operation, > > > + * dax_iomap_actor() might call this to do the copy of either start > > > +or end > > > + * unaligned address. In this case the rest of the copy of aligned > > > +ranges is > > > + * taken care by dax_iomap_actor() itself. > > > + * Also, note DAX fault will always result in aligned pos and pos + length. > > > + */ > > > +static int dax_iomap_cow_copy(loff_t pos, loff_t length, size_t > > > +align_size, > > > > Nit: Linus has asked us not to continue the use of loff_t for file io length. Could > > you change this to 'uint64_t length', please? > > (Assuming we even need the extra length bits?) > > > > With that fixed up... > > Reviewed-by: Darrick J. Wong <djwong@xxxxxxxxxx> > > > > --D > > > > > + struct iomap *srcmap, void *daddr) > > > +{ > > > + loff_t head_off = pos & (align_size - 1); > > > > Other nit: head_off = round_down(pos, align_size); ? > > We need the offset within a page here, either PTE or PMD. So I think round_down() is not suitable here. Oops, yeah. /me wonders if any of Matthew's folio cleanups will reduce the amount of opencoding around this... --D > > > -- > Thanks, > Ruan Shiyang. > > > > > > + size_t size = ALIGN(head_off + length, align_size); > > > + loff_t end = pos + length; > > > + loff_t pg_end = round_up(end, align_size); > > > + bool copy_all = head_off == 0 && end == pg_end; > > > + void *saddr = 0; > > > + int ret = 0; > > > + > > > + ret = dax_iomap_direct_access(srcmap, pos, size, &saddr, NULL); > > > + if (ret) > > > + return ret; > > > + > > > + if (copy_all) { > > > + ret = copy_mc_to_kernel(daddr, saddr, length); > > > + return ret ? -EIO : 0; > > > + } > > > + > > > + /* Copy the head part of the range. Note: we pass offset as length. */ > > > + if (head_off) { > > > + ret = copy_mc_to_kernel(daddr, saddr, head_off); > > > + if (ret) > > > + return -EIO; > > > + } > > > + > > > + /* Copy the tail part of the range */ > > > + if (end < pg_end) { > > > + loff_t tail_off = head_off + length; > > > + loff_t tail_len = pg_end - end; > > > + > > > + ret = copy_mc_to_kernel(daddr + tail_off, saddr + tail_off, > > > + tail_len); > > > + if (ret) > > > + return -EIO; > > > + } > > > + return 0; > > > +} > > > + > > > /* > > > * The user has performed a load from a hole in the file. Allocating a new > > > * page in the file would cause excessive storage usage for workloads > > > with @@ -1167,11 +1222,12 @@ dax_iomap_actor(struct inode *inode, > > loff_t pos, loff_t length, void *data, > > > struct dax_device *dax_dev = iomap->dax_dev; > > > struct iov_iter *iter = data; > > > loff_t end = pos + length, done = 0; > > > + bool write = iov_iter_rw(iter) == WRITE; > > > ssize_t ret = 0; > > > size_t xfer; > > > int id; > > > > > > - if (iov_iter_rw(iter) == READ) { > > > + if (!write) { > > > end = min(end, i_size_read(inode)); > > > if (pos >= end) > > > return 0; > > > @@ -1180,7 +1236,12 @@ dax_iomap_actor(struct inode *inode, loff_t pos, > > loff_t length, void *data, > > > return iov_iter_zero(min(length, end - pos), iter); > > > } > > > > > > - if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) > > > + /* > > > + * In DAX mode, we allow either pure overwrites of written extents, or > > > + * writes to unwritten extents as part of a copy-on-write operation. > > > + */ > > > + if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED && > > > + !(iomap->flags & IOMAP_F_SHARED))) > > > return -EIO; > > > > > > /* > > > @@ -1219,6 +1280,13 @@ dax_iomap_actor(struct inode *inode, loff_t pos, > > loff_t length, void *data, > > > break; > > > } > > > > > > + if (write && srcmap->addr != iomap->addr) { > > > + ret = dax_iomap_cow_copy(pos, length, PAGE_SIZE, srcmap, > > > + kaddr); > > > + if (ret) > > > + break; > > > + } > > > + > > > map_len = PFN_PHYS(map_len); > > > kaddr += offset; > > > map_len -= offset; > > > @@ -1230,7 +1298,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, > > loff_t length, void *data, > > > * validated via access_ok() in either vfs_read() or > > > * vfs_write(), depending on which operation we are doing. > > > */ > > > - if (iov_iter_rw(iter) == WRITE) > > > + if (write) > > > xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr, > > > map_len, iter); > > > else > > > @@ -1382,6 +1450,7 @@ static vm_fault_t dax_fault_actor(struct vm_fault > > *vmf, pfn_t *pfnp, > > > unsigned long entry_flags = pmd ? DAX_PMD : 0; > > > int err = 0; > > > pfn_t pfn; > > > + void *kaddr; > > > > > > /* if we are reading UNWRITTEN and HOLE, return a hole. */ > > > if (!write && > > > @@ -1392,18 +1461,25 @@ static vm_fault_t dax_fault_actor(struct > > vm_fault *vmf, pfn_t *pfnp, > > > return dax_pmd_load_hole(xas, vmf, iomap, entry); > > > } > > > > > > - if (iomap->type != IOMAP_MAPPED) { > > > + if (iomap->type != IOMAP_MAPPED && !(iomap->flags & > > IOMAP_F_SHARED)) > > > +{ > > > WARN_ON_ONCE(1); > > > return pmd ? VM_FAULT_FALLBACK : VM_FAULT_SIGBUS; > > > } > > > > > > - err = dax_iomap_direct_access(iomap, pos, size, NULL, &pfn); > > > + err = dax_iomap_direct_access(iomap, pos, size, &kaddr, &pfn); > > > if (err) > > > return pmd ? VM_FAULT_FALLBACK : dax_fault_return(err); > > > > > > *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn, entry_flags, > > > write && !sync); > > > > > > + if (write && > > > + srcmap->addr != IOMAP_HOLE && srcmap->addr != iomap->addr) { > > > + err = dax_iomap_cow_copy(pos, size, size, srcmap, kaddr); > > > + if (err) > > > + return dax_fault_return(err); > > > + } > > > + > > > if (sync) > > > return dax_fault_synchronous_pfnp(pfnp, pfn); > > > > > > -- > > > 2.31.1 > > > > > > > > >