Patch "brd: check for REQ_NOWAIT and set correct page allocation mask" has been added to the 6.1-stable tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This is a note to let you know that I've just added the patch titled

    brd: check for REQ_NOWAIT and set correct page allocation mask

to the 6.1-stable tree which can be found at:
    http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git;a=summary

The filename of the patch is:
     brd-check-for-req_nowait-and-set-correct-page-allocation-mask.patch
and it can be found in the queue-6.1 subdirectory.

If you, or anyone else, feels it should not be added to the stable tree,
please let <stable@xxxxxxxxxxxxxxx> know about it.


>From 6ded703c56c21bfb259725d4f1831a5feb563e9b Mon Sep 17 00:00:00 2001
From: Jens Axboe <axboe@xxxxxxxxx>
Date: Thu, 16 Feb 2023 08:01:08 -0700
Subject: brd: check for REQ_NOWAIT and set correct page allocation mask

From: Jens Axboe <axboe@xxxxxxxxx>

commit 6ded703c56c21bfb259725d4f1831a5feb563e9b upstream.

If REQ_NOWAIT is set, then do a non-blocking allocation if the operation
is a write and we need to insert a new page. Currently REQ_NOWAIT cannot
be set as the queue isn't marked as supporting nowait, this change is in
preparation for allowing that.

radix_tree_preload() warns on attempting to call it with an allocation
mask that doesn't allow blocking. While that warning could arguably
be removed, we need to handle radix insertion failures anyway as they
are more likely if we cannot block to get memory.

Remove legacy BUG_ON()'s and turn them into proper errors instead, one
for the allocation failure and one for finding a page that doesn't
match the correct index.

Cc: stable@xxxxxxxxxxxxxxx # 5.10+
Reviewed-by: Christoph Hellwig <hch@xxxxxx>
Signed-off-by: Jens Axboe <axboe@xxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
 drivers/block/brd.c |   48 ++++++++++++++++++++++++++++--------------------
 1 file changed, 28 insertions(+), 20 deletions(-)

--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -80,26 +80,21 @@ static struct page *brd_lookup_page(stru
 /*
  * Insert a new page for a given sector, if one does not already exist.
  */
-static int brd_insert_page(struct brd_device *brd, sector_t sector)
+static int brd_insert_page(struct brd_device *brd, sector_t sector, gfp_t gfp)
 {
 	pgoff_t idx;
 	struct page *page;
-	gfp_t gfp_flags;
+	int ret = 0;
 
 	page = brd_lookup_page(brd, sector);
 	if (page)
 		return 0;
 
-	/*
-	 * Must use NOIO because we don't want to recurse back into the
-	 * block or filesystem layers from page reclaim.
-	 */
-	gfp_flags = GFP_NOIO | __GFP_ZERO | __GFP_HIGHMEM;
-	page = alloc_page(gfp_flags);
+	page = alloc_page(gfp | __GFP_ZERO | __GFP_HIGHMEM);
 	if (!page)
 		return -ENOMEM;
 
-	if (radix_tree_preload(GFP_NOIO)) {
+	if (gfpflags_allow_blocking(gfp) && radix_tree_preload(gfp)) {
 		__free_page(page);
 		return -ENOMEM;
 	}
@@ -110,15 +105,17 @@ static int brd_insert_page(struct brd_de
 	if (radix_tree_insert(&brd->brd_pages, idx, page)) {
 		__free_page(page);
 		page = radix_tree_lookup(&brd->brd_pages, idx);
-		BUG_ON(!page);
-		BUG_ON(page->index != idx);
+		if (!page)
+			ret = -ENOMEM;
+		else if (page->index != idx)
+			ret = -EIO;
 	} else {
 		brd->brd_nr_pages++;
 	}
 	spin_unlock(&brd->brd_lock);
 
 	radix_tree_preload_end();
-	return 0;
+	return ret;
 }
 
 /*
@@ -167,19 +164,20 @@ static void brd_free_pages(struct brd_de
 /*
  * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
  */
-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
+static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
+			     gfp_t gfp)
 {
 	unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
 	size_t copy;
 	int ret;
 
 	copy = min_t(size_t, n, PAGE_SIZE - offset);
-	ret = brd_insert_page(brd, sector);
+	ret = brd_insert_page(brd, sector, gfp);
 	if (ret)
 		return ret;
 	if (copy < n) {
 		sector += copy >> SECTOR_SHIFT;
-		ret = brd_insert_page(brd, sector);
+		ret = brd_insert_page(brd, sector, gfp);
 	}
 	return ret;
 }
@@ -254,20 +252,26 @@ static void copy_from_brd(void *dst, str
  * Process a single bvec of a bio.
  */
 static int brd_do_bvec(struct brd_device *brd, struct page *page,
-			unsigned int len, unsigned int off, enum req_op op,
+			unsigned int len, unsigned int off, blk_opf_t opf,
 			sector_t sector)
 {
 	void *mem;
 	int err = 0;
 
-	if (op_is_write(op)) {
-		err = copy_to_brd_setup(brd, sector, len);
+	if (op_is_write(opf)) {
+		/*
+		 * Must use NOIO because we don't want to recurse back into the
+		 * block or filesystem layers from page reclaim.
+		 */
+		gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
+
+		err = copy_to_brd_setup(brd, sector, len, gfp);
 		if (err)
 			goto out;
 	}
 
 	mem = kmap_atomic(page);
-	if (!op_is_write(op)) {
+	if (!op_is_write(opf)) {
 		copy_from_brd(mem + off, brd, sector, len);
 		flush_dcache_page(page);
 	} else {
@@ -296,8 +300,12 @@ static void brd_submit_bio(struct bio *b
 				(len & (SECTOR_SIZE - 1)));
 
 		err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
-				  bio_op(bio), sector);
+				  bio->bi_opf, sector);
 		if (err) {
+			if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
+				bio_wouldblock_error(bio);
+				return;
+			}
 			bio_io_error(bio);
 			return;
 		}


Patches currently in stable-queue which might be from axboe@xxxxxxxxx are

queue-6.1/sbitmap-use-single-per-bitmap-counting-to-wake-up-qu.patch
queue-6.1/io_uring-handle-tif_notify_resume-when-checking-for-task_work.patch
queue-6.1/brd-return-0-error-from-brd_insert_page.patch
queue-6.1/fs-hfsplus-fix-uaf-issue-in-hfsplus_put_super.patch
queue-6.1/block-don-t-allow-multiple-bios-for-iocb_nowait-issue.patch
queue-6.1/sbitmap-correct-wake_batch-recalculation-to-avoid-po.patch
queue-6.1/blk-mq-avoid-sleep-in-blk_mq_alloc_request_hctx.patch
queue-6.1/io_uring-add-reschedule-point-to-handle_tw_list.patch
queue-6.1/ublk_drv-remove-nr_aborted_queues-from-ublk_device.patch
queue-6.1/io_uring-remove-msg_nosignal-from-recvmsg.patch
queue-6.1/brd-check-for-req_nowait-and-set-correct-page-allocation-mask.patch
queue-6.1/io_uring-mark-task-task_running-before-handling-resume-task-work.patch
queue-6.1/blk-mq-fix-potential-io-hung-for-shared-sbitmap-per-.patch
queue-6.1/blk-mq-wait-on-correct-sbitmap_queue-in-blk_mq_mark_.patch
queue-6.1/block-clear-bio-bi_bdev-when-putting-a-bio-back-in-the-cache.patch
queue-6.1/io_uring-fix-fget-leak-when-fs-don-t-support-nowait-buffered-read.patch
queue-6.1/ublk_drv-don-t-probe-partitions-if-the-ubq-daemon-is.patch
queue-6.1/trace-blktrace-fix-memory-leak-with-using-debugfs_lo.patch
queue-6.1/io_uring-rsrc-disallow-multi-source-reg-buffers.patch
queue-6.1/x86-fpu-don-t-set-tif_need_fpu_load-for-pf_io_worker.patch
queue-6.1/io_uring-replace-0-length-array-with-flexible-array.patch
queue-6.1/blk-cgroup-dropping-parent-refcount-after-pd_free_fn.patch
queue-6.1/block-be-a-bit-more-careful-in-checking-for-null-bdev-while-polling.patch
queue-6.1/block-use-proper-return-value-from-bio_failfast.patch
queue-6.1/block-fix-io-statistics-for-cgroup-in-throttle-path.patch
queue-6.1/block-ublk-check-io-buffer-based-on-flag-need_get_da.patch
queue-6.1/io_uring-use-user-visible-tail-in-io_uring_poll.patch
queue-6.1/blk-cgroup-synchronize-pd_free_fn-from-blkg_free_wor.patch
queue-6.1/sbitmap-remove-redundant-check-in-__sbitmap_queue_ge.patch
queue-6.1/block-sync-mixed-merged-request-s-failfast-with-1st-.patch
queue-6.1/blk-mq-remove-stale-comment-for-blk_mq_sched_mark_re.patch
queue-6.1/brd-mark-as-nowait-compatible.patch
queue-6.1/blk-iocost-fix-divide-by-0-error-in-calc_lcoefs.patch
queue-6.1/s390-dasd-fix-potential-memleak-in-dasd_eckd_init.patch
queue-6.1/blk-mq-correct-stale-comment-of-.get_budget.patch
queue-6.1/io_uring-add-a-conditional-reschedule-to-the-iopoll-cancelation-loop.patch
queue-6.1/block-bio-integrity-copy-flags-when-bio_integrity_pa.patch



[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Index of Archives]     [Linux USB Devel]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]

  Powered by Linux