Re: [PATCH v8 01/10] iov_iter: Define flags to qualify page extraction.

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On 1/23/23 09:29, David Howells wrote:
Define flags to qualify page extraction to pass into iov_iter_*_pages*()
rather than passing in FOLL_* flags.

For now only a flag to allow peer-to-peer DMA is supported.

Signed-off-by: David Howells <dhowells@xxxxxxxxxx>
cc: Al Viro <viro@xxxxxxxxxxxxxxxxxx>
cc: Christoph Hellwig <hch@xxxxxxxxxxxxx>
cc: Jens Axboe <axboe@xxxxxxxxx>
cc: Logan Gunthorpe <logang@xxxxxxxxxxxx>
cc: linux-fsdevel@xxxxxxxxxxxxxxx
cc: linux-block@xxxxxxxxxxxxxxx
---

Notes:
     ver #7)
      - Don't use FOLL_* as a parameter, but rather define constants
        specifically to use with iov_iter_*_pages*().
      - Drop the I/O direction constants for now.

  block/bio.c         |  6 +++---
  block/blk-map.c     |  8 ++++----
  include/linux/uio.h |  7 +++++--
  lib/iov_iter.c      | 14 ++++++++------
  4 files changed, 20 insertions(+), 15 deletions(-)

One possible way this could run into problems is if any callers are
violating the new requirement that flags be limited to those used to
govern iov extraction. In other words, if any callers were passing other
gup flags in (because instead of adding to them, the code is now setting
flags to zero and only looking at the new extract_flags). So I checked
for that and there aren't any.

So this looks good. I will lightly suggest renaming extract_flags to
extraction_flags, because it reads like a boolean: "to extract, or not
to extract flags, that is the question". Of course, once you look at the
code it is clear. But the extra "ion" doesn't overflow the 80 cols
anywhere and it is a nice touch.

Up to you. Either way, please feel free to add:

Reviewed-by: John Hubbard <jhubbard@xxxxxxxxxx>

thanks,
--
John Hubbard
NVIDIA


diff --git a/block/bio.c b/block/bio.c
index ab59a491a883..a289bbff036f 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1249,7 +1249,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
  	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
  	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
  	struct page **pages = (struct page **)bv;
-	unsigned int gup_flags = 0;
+	unsigned int extract_flags = 0;
  	ssize_t size, left;
  	unsigned len, i = 0;
  	size_t offset, trim;
@@ -1264,7 +1264,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
  	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
-		gup_flags |= FOLL_PCI_P2PDMA;
+		extract_flags |= ITER_ALLOW_P2PDMA;
/*
  	 * Each segment in the iov is required to be a block size multiple.
@@ -1275,7 +1275,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
  	 */
  	size = iov_iter_get_pages(iter, pages,
  				  UINT_MAX - bio->bi_iter.bi_size,
-				  nr_pages, &offset, gup_flags);
+				  nr_pages, &offset, extract_flags);
  	if (unlikely(size <= 0))
  		return size ? size : -EFAULT;
diff --git a/block/blk-map.c b/block/blk-map.c
index 19940c978c73..bc111261fc82 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -267,7 +267,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
  {
  	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
  	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
-	unsigned int gup_flags = 0;
+	unsigned int extract_flags = 0;
  	struct bio *bio;
  	int ret;
  	int j;
@@ -280,7 +280,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
  		return -ENOMEM;
if (blk_queue_pci_p2pdma(rq->q))
-		gup_flags |= FOLL_PCI_P2PDMA;
+		extract_flags |= ITER_ALLOW_P2PDMA;
while (iov_iter_count(iter)) {
  		struct page **pages, *stack_pages[UIO_FASTIOV];
@@ -291,10 +291,10 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
  		if (nr_vecs <= ARRAY_SIZE(stack_pages)) {
  			pages = stack_pages;
  			bytes = iov_iter_get_pages(iter, pages, LONG_MAX,
-						   nr_vecs, &offs, gup_flags);
+						   nr_vecs, &offs, extract_flags);
  		} else {
  			bytes = iov_iter_get_pages_alloc(iter, &pages,
-						LONG_MAX, &offs, gup_flags);
+						LONG_MAX, &offs, extract_flags);
  		}
  		if (unlikely(bytes <= 0)) {
  			ret = bytes ? bytes : -EFAULT;
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 9f158238edba..46d5080314c6 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -252,12 +252,12 @@ void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *
  		     loff_t start, size_t count);
  ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages,
  		size_t maxsize, unsigned maxpages, size_t *start,
-		unsigned gup_flags);
+		unsigned extract_flags);
  ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
  			size_t maxsize, unsigned maxpages, size_t *start);
  ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
  		struct page ***pages, size_t maxsize, size_t *start,
-		unsigned gup_flags);
+		unsigned extract_flags);
  ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
  			size_t maxsize, size_t *start);
  int iov_iter_npages(const struct iov_iter *i, int maxpages);
@@ -360,4 +360,7 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
  	};
  }
+/* Flags for iov_iter_get/extract_pages*() */
+#define ITER_ALLOW_P2PDMA	0x01	/* Allow P2PDMA on the extracted pages */
+
  #endif
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index f9a3ff37ecd1..fb04abe7d746 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1432,9 +1432,9 @@ static struct page *first_bvec_segment(const struct iov_iter *i,
  static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
  		   struct page ***pages, size_t maxsize,
  		   unsigned int maxpages, size_t *start,
-		   unsigned int gup_flags)
+		   unsigned int extract_flags)
  {
-	unsigned int n;
+	unsigned int n, gup_flags = 0;
if (maxsize > i->count)
  		maxsize = i->count;
@@ -1442,6 +1442,8 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
  		return 0;
  	if (maxsize > MAX_RW_COUNT)
  		maxsize = MAX_RW_COUNT;
+	if (extract_flags & ITER_ALLOW_P2PDMA)
+		gup_flags |= FOLL_PCI_P2PDMA;
if (likely(user_backed_iter(i))) {
  		unsigned long addr;
@@ -1495,14 +1497,14 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
ssize_t iov_iter_get_pages(struct iov_iter *i,
  		   struct page **pages, size_t maxsize, unsigned maxpages,
-		   size_t *start, unsigned gup_flags)
+		   size_t *start, unsigned extract_flags)
  {
  	if (!maxpages)
  		return 0;
  	BUG_ON(!pages);
return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
-					  start, gup_flags);
+					  start, extract_flags);
  }
  EXPORT_SYMBOL_GPL(iov_iter_get_pages);
@@ -1515,14 +1517,14 @@ EXPORT_SYMBOL(iov_iter_get_pages2); ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
  		   struct page ***pages, size_t maxsize,
-		   size_t *start, unsigned gup_flags)
+		   size_t *start, unsigned extract_flags)
  {
  	ssize_t len;
*pages = NULL; len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
-					 gup_flags);
+					 extract_flags);
  	if (len <= 0) {
  		kvfree(*pages);
  		*pages = NULL;





[Index of Archives]     [Linux RAID]     [Linux SCSI]     [Linux ATA RAID]     [IDE]     [Linux Wireless]     [Linux Kernel]     [ATH6KL]     [Linux Bluetooth]     [Linux Netdev]     [Kernel Newbies]     [Security]     [Git]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Device Mapper]

  Powered by Linux