Re: [PATCH 03/13] x86, dax, pmem: introduce 'copy_from_iter' dax operation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Thu, Jan 19, 2017 at 07:50:29PM -0800, Dan Williams wrote:
> The direct-I/O write path for a pmem device must ensure that data is flushed
> to a power-fail safe zone when the operation is complete. However, other
> dax capable block devices, like brd, do not have this requirement.
> Introduce a 'copy_from_iter' dax operation so that pmem can inject
> cache management without imposing this overhead on other dax capable
> block_device drivers.
> 
> Cc: <x86@xxxxxxxxxx>
> Cc: Jan Kara <jack@xxxxxxx>
> Cc: Jeff Moyer <jmoyer@xxxxxxxxxx>
> Cc: Ingo Molnar <mingo@xxxxxxxxxx>
> Cc: Christoph Hellwig <hch@xxxxxx>
> Cc: "H. Peter Anvin" <hpa@xxxxxxxxx>
> Cc: Al Viro <viro@xxxxxxxxxxxxxxxxxx>
> Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
> Cc: Matthew Wilcox <mawilcox@xxxxxxxxxxxxx>
> Cc: Ross Zwisler <ross.zwisler@xxxxxxxxxxxxxxx>
> Signed-off-by: Dan Williams <dan.j.williams@xxxxxxxxx>
> ---
>  arch/x86/include/asm/pmem.h |   31 -------------------------------
>  drivers/nvdimm/pmem.c       |   10 ++++++++++
>  fs/dax.c                    |   11 ++++++++++-
>  include/linux/blkdev.h      |    1 +
>  include/linux/pmem.h        |   24 ------------------------
>  5 files changed, 21 insertions(+), 56 deletions(-)
> 
> diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
> index f26ba430d853..0ca5e693f4a2 100644
> --- a/arch/x86/include/asm/pmem.h
> +++ b/arch/x86/include/asm/pmem.h
> @@ -64,37 +64,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
>  		clwb(p);
>  }
>  
> -/*
> - * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
> - * iterators, so for other types (bvec & kvec) we must do a cache write-back.
> - */
> -static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
> -{
> -	return iter_is_iovec(i) == false;
> -}
> -
> -/**
> - * arch_copy_from_iter_pmem - copy data from an iterator to PMEM
> - * @addr:	PMEM destination address
> - * @bytes:	number of bytes to copy
> - * @i:		iterator with source data
> - *
> - * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
> - */
> -static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
> -		struct iov_iter *i)
> -{
> -	size_t len;
> -
> -	/* TODO: skip the write-back by always using non-temporal stores */
> -	len = copy_from_iter_nocache(addr, bytes, i);
> -
> -	if (__iter_needs_pmem_wb(i))
> -		arch_wb_cache_pmem(addr, bytes);

This writeback is no longer conditional in the pmem_copy_from_iter() version,
which means that for iovec iterators you do a non-temporal store and then
afterwards take the time to loop through and flush the cachelines?  This seems
incorrect, and I wonder if this could be the cause of the performance
regression reported by 0-day?

> -
> -	return len;
> -}
> -
>  /**
>   * arch_clear_pmem - zero a PMEM memory range
>   * @addr:	virtual start address
> diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
> index 6e5442174245..71e5e365d3fc 100644
> --- a/drivers/nvdimm/pmem.c
> +++ b/drivers/nvdimm/pmem.c
> @@ -217,8 +217,18 @@ __weak long pmem_direct_access(struct block_device *bdev, sector_t sector,
>  	return pmem->size - pmem->pfn_pad - offset;
>  }
>  
> +static size_t pmem_copy_from_iter(void *addr, size_t bytes,
> +		struct iov_iter *i)
> +{
> +	size_t rc = copy_from_iter_nocache(addr, bytes, i);
> +
> +	wb_cache_pmem(addr, bytes);
> +	return rc;
> +}
> +
>  static const struct dax_operations pmem_dax_ops = {
>  	.direct_access = pmem_direct_access,
> +	.copy_from_iter = pmem_copy_from_iter,
>  };
>  
>  static const struct block_device_operations pmem_fops = {
> diff --git a/fs/dax.c b/fs/dax.c
> index 81a77c070344..22cd57424a55 100644
> --- a/fs/dax.c
> +++ b/fs/dax.c
> @@ -1006,6 +1006,10 @@ static loff_t
>  dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
>  		struct iomap *iomap)
>  {
> +	struct block_device *bdev = iomap->bdev;
> +	size_t (*dax_copy_from_iter)(void *, size_t, struct iov_iter *);
> +	const struct block_device_operations *ops = bdev->bd_disk->fops;
> +	const struct dax_operations *dax_ops = ops->dax_ops;
>  	struct iov_iter *iter = data;
>  	loff_t end = pos + length, done = 0;
>  	ssize_t ret = 0;
> @@ -1033,6 +1037,11 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
>  					      (end - 1) >> PAGE_SHIFT);
>  	}
>  
> +	if (dax_ops->copy_from_iter)
> +		dax_copy_from_iter = dax_ops->copy_from_iter;
> +	else
> +		dax_copy_from_iter = copy_from_iter_nocache;
> +
>  	while (pos < end) {
>  		unsigned offset = pos & (PAGE_SIZE - 1);
>  		struct blk_dax_ctl dax = { 0 };
> @@ -1052,7 +1061,7 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
>  			map_len = end - pos;
>  
>  		if (iov_iter_rw(iter) == WRITE)
> -			map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
> +			map_len = dax_copy_from_iter(dax.addr, map_len, iter);
>  		else
>  			map_len = copy_to_iter(dax.addr, map_len, iter);
>  		dax_unmap_atomic(iomap->bdev, &dax);
> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
> index 8afce34823f5..7ca559d124a3 100644
> --- a/include/linux/blkdev.h
> +++ b/include/linux/blkdev.h
> @@ -1866,6 +1866,7 @@ struct blk_dax_ctl {
>  struct dax_operations {
>  	long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
>  			long);
> +	size_t (*copy_from_iter)(void *, size_t, struct iov_iter *);
>  };
>  
>  struct block_device_operations {
> diff --git a/include/linux/pmem.h b/include/linux/pmem.h
> index 71ecf3d46aac..9d542a5600e4 100644
> --- a/include/linux/pmem.h
> +++ b/include/linux/pmem.h
> @@ -31,13 +31,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
>  	BUG();
>  }
>  
> -static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
> -		struct iov_iter *i)
> -{
> -	BUG();
> -	return 0;
> -}
> -
>  static inline void arch_clear_pmem(void *addr, size_t size)
>  {
>  	BUG();
> @@ -80,23 +73,6 @@ static inline void memcpy_to_pmem(void *dst, const void *src, size_t n)
>  }
>  
>  /**
> - * copy_from_iter_pmem - copy data from an iterator to PMEM
> - * @addr:	PMEM destination address
> - * @bytes:	number of bytes to copy
> - * @i:		iterator with source data
> - *
> - * Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
> - * See blkdev_issue_flush() note for memcpy_to_pmem().
> - */
> -static inline size_t copy_from_iter_pmem(void *addr, size_t bytes,
> -		struct iov_iter *i)
> -{
> -	if (arch_has_pmem_api())
> -		return arch_copy_from_iter_pmem(addr, bytes, i);
> -	return copy_from_iter_nocache(addr, bytes, i);
> -}
> -
> -/**
>   * clear_pmem - zero a PMEM memory range
>   * @addr:	virtual start address
>   * @size:	number of bytes to zero
> 



[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [Samba]     [Device Mapper]     [CEPH Development]
  Powered by Linux