On 12/14/24 04:10, Luis Chamberlain wrote:
Move the code from block_read_full_folio() which does a batch of async
reads into a helper.
No functional changes.
Signed-off-by: Luis Chamberlain <mcgrof@xxxxxxxxxx>
---
fs/buffer.c | 73 +++++++++++++++++++++++++++++++----------------------
1 file changed, 43 insertions(+), 30 deletions(-)
diff --git a/fs/buffer.c b/fs/buffer.c
index cc8452f60251..580451337efa 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2350,6 +2350,48 @@ bool block_is_partially_uptodate(struct folio *folio, size_t from, size_t count)
}
EXPORT_SYMBOL(block_is_partially_uptodate);
+static void bh_read_batch_async(struct folio *folio,
+ int nr, struct buffer_head *arr[],
+ bool fully_mapped, bool no_reads,
+ bool any_get_block_error)
+{
+ int i;
+ struct buffer_head *bh;
+
+ if (fully_mapped)
+ folio_set_mappedtodisk(folio);
+
+ if (no_reads) {
+ /*
+ * All buffers are uptodate or get_block() returned an
+ * error when trying to map them *all* buffers we can
+ * finish the read.
+ */
+ folio_end_read(folio, !any_get_block_error);
+ return;
+ }
+
+ /* Stage one: lock the buffers */
Now you messed up documentation:
Originally this was 'stage two', so now we have two 'stage one'
comments.
Please use the original documentation convention and add a note
to the helper that it's contingent on the 'stage 1' in the
calling function.
+ for (i = 0; i < nr; i++) {
+ bh = arr[i];
+ lock_buffer(bh);
+ mark_buffer_async_read(bh);
+ }
+
+ /*
+ * Stage 2: start the IO. Check for uptodateness
+ * inside the buffer lock in case another process reading
+ * the underlying blockdev brought it uptodate (the sct fix).
+ */
Same here; should be 'stage 3' to be consistent.
+ for (i = 0; i < nr; i++) {
+ bh = arr[i];
+ if (buffer_uptodate(bh))
+ end_buffer_async_read(bh, 1);
+ else
+ submit_bh(REQ_OP_READ, bh);
+ }
+}
+
/*
* Generic "read_folio" function for block devices that have the normal
* get_block functionality. This is most of the block device filesystems.
@@ -2414,37 +2456,8 @@ int block_read_full_folio(struct folio *folio, get_block_t *get_block)
arr[nr++] = bh;
} while (i++, iblock++, (bh = bh->b_this_page) != head);
- if (fully_mapped)
- folio_set_mappedtodisk(folio);
-
- if (!nr) {
- /*
- * All buffers are uptodate or get_block() returned an
- * error when trying to map them - we can finish the read.
- */
- folio_end_read(folio, !page_error);
- return 0;
- }
-
- /* Stage two: lock the buffers */
- for (i = 0; i < nr; i++) {
- bh = arr[i];
- lock_buffer(bh);
- mark_buffer_async_read(bh);
- }
+ bh_read_batch_async(folio, nr, arr, fully_mapped, nr == 0, page_error);
- /*
- * Stage 3: start the IO. Check for uptodateness
- * inside the buffer lock in case another process reading
- * the underlying blockdev brought it uptodate (the sct fix).
- */
- for (i = 0; i < nr; i++) {
- bh = arr[i];
- if (buffer_uptodate(bh))
- end_buffer_async_read(bh, 1);
- else
- submit_bh(REQ_OP_READ, bh);
- }
return 0;
}
EXPORT_SYMBOL(block_read_full_folio);
Otherwise looks good.
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@xxxxxxx +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich