[PATCH v1 11/15] block: make sure bio_add_page*() knows page that are coming from GUP

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



From: Jérôme Glisse <jglisse@xxxxxxxxxx>

When we get a page reference through get_user_page*() we want to keep
track of that so pass down that information to bio_add_page*().

Signed-off-by: Jérôme Glisse <jglisse@xxxxxxxxxx>
Cc: linux-fsdevel@xxxxxxxxxxxxxxx
Cc: linux-block@xxxxxxxxxxxxxxx
Cc: linux-mm@xxxxxxxxx
Cc: John Hubbard <jhubbard@xxxxxxxxxx>
Cc: Jan Kara <jack@xxxxxxx>
Cc: Dan Williams <dan.j.williams@xxxxxxxxx>
Cc: Alexander Viro <viro@xxxxxxxxxxxxxxxxxx>
Cc: Johannes Thumshirn <jthumshirn@xxxxxxx>
Cc: Christoph Hellwig <hch@xxxxxx>
Cc: Jens Axboe <axboe@xxxxxxxxx>
Cc: Ming Lei <ming.lei@xxxxxxxxxx>
Cc: Dave Chinner <david@xxxxxxxxxxxxx>
Cc: Jason Gunthorpe <jgg@xxxxxxxx>
Cc: Matthew Wilcox <willy@xxxxxxxxxxxxx>
---
 block/bio.c | 34 +++++++++++++++++++++++++++-------
 1 file changed, 27 insertions(+), 7 deletions(-)

diff --git a/block/bio.c b/block/bio.c
index 73227ede9a0a..197b70426aa6 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -708,7 +708,10 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
 	 * cannot add the page
 	 */
 	bvec = &bio->bi_io_vec[bio->bi_vcnt];
-	bvec_set_page(bvec, page);
+	if (is_gup)
+		bvec_set_gup_page(bvec, page);
+	else
+		bvec_set_page(bvec, page);
 	bvec->bv_len = len;
 	bvec->bv_offset = offset;
 	bio->bi_vcnt++;
@@ -793,6 +796,7 @@ EXPORT_SYMBOL_GPL(__bio_try_merge_page);
  * @page: page to add
  * @len: length of the data to add
  * @off: offset of the data in @page
+ * @is_gup: was the page referenced through GUP (get_user_page*)
  *
  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
  * that @bio has space for another bvec.
@@ -805,7 +809,10 @@ void __bio_add_page(struct bio *bio, struct page *page,
 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
 	WARN_ON_ONCE(bio_full(bio));
 
-	bvec_set_page(bv, page);
+	if (is_gup)
+		bvec_set_gup_page(bv, page);
+	else
+		bvec_set_page(bv, page);
 	bv->bv_offset = off;
 	bv->bv_len = len;
 
@@ -820,6 +827,7 @@ EXPORT_SYMBOL_GPL(__bio_add_page);
  *	@page: page to add
  *	@len: vec entry length
  *	@offset: vec entry offset
+ *	@is_gup: was the page referenced through GUP (get_user_page*)
  *
  *	Attempt to add a page to the bio_vec maplist. This will only fail
  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
@@ -830,7 +838,7 @@ int bio_add_page(struct bio *bio, struct page *page,
 	if (!__bio_try_merge_page(bio, page, len, offset, false)) {
 		if (bio_full(bio))
 			return 0;
-		__bio_add_page(bio, page, len, offset, false);
+		__bio_add_page(bio, page, len, offset, is_gup);
 	}
 	return len;
 }
@@ -885,6 +893,7 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 	ssize_t size, left;
 	unsigned len, i;
 	size_t offset;
+	bool gup;
 
 	/*
 	 * Move page array up in the allocated memory for the bio vecs as far as
@@ -894,6 +903,8 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
 
+	/* Is iov_iter_get_pages() using GUP ? */
+	gup = iov_iter_get_pages_use_gup(iter);
 	size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
 	if (unlikely(size <= 0))
 		return size ? size : -EFAULT;
@@ -902,7 +913,8 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
 		struct page *page = pages[i];
 
 		len = min_t(size_t, PAGE_SIZE - offset, left);
-		if (WARN_ON_ONCE(bio_add_page(bio, page, len, offset, false) != len))
+		if (WARN_ON_ONCE(bio_add_page(bio, page, len,
+					      offset, gup) != len))
 			return -EINVAL;
 		offset = 0;
 	}
@@ -1372,6 +1384,10 @@ struct bio *bio_map_user_iov(struct request_queue *q,
 		ssize_t bytes;
 		size_t offs, added = 0;
 		int npages;
+		bool gup;
+
+		/* Is iov_iter_get_pages() using GUP ? */
+		gup = iov_iter_get_pages_use_gup(iter);
 
 		bytes = iov_iter_get_pages_alloc(iter, &pages, LONG_MAX, &offs);
 		if (unlikely(bytes <= 0)) {
@@ -1393,7 +1409,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
 				if (n > bytes)
 					n = bytes;
 
-				if (!bio_add_pc_page(q, bio, page, n, offs, false))
+				if (!bio_add_pc_page(q, bio, page, n, offs, gup))
 					break;
 
 				/*
@@ -1412,8 +1428,12 @@ struct bio *bio_map_user_iov(struct request_queue *q,
 		/*
 		 * release the pages we didn't map into the bio, if any
 		 */
-		while (j < npages)
-			put_page(pages[j++]);
+		while (j < npages) {
+			if (gup)
+				put_user_page(pages[j++]);
+			else
+				put_page(pages[j++]);
+		}
 		kvfree(pages);
 		/* couldn't stuff something into bio? */
 		if (bytes)
-- 
2.20.1




[Index of Archives]     [Linux ARM Kernel]     [Linux ARM]     [Linux Omap]     [Fedora ARM]     [IETF Annouce]     [Bugtraq]     [Linux OMAP]     [Linux MIPS]     [eCos]     [Asterisk Internet PBX]     [Linux API]

  Powered by Linux