On Tue, Mar 19, 2024 at 09:45:58AM +1100, Dave Chinner wrote: > From: Christoph Hellwig <hch@xxxxxx> > > Instead of walking the folio array just walk the kernel virtual > address in ->b_addr. This prepares for using vmalloc for buffers > and removing the b_folio array. > > [dchinner: ported to folios-based buffers.] > > Signed-off-by: Christoph Hellwig <hch@xxxxxx> > Signed-off-by: Dave Chinner <dchinner@xxxxxxxxxx> > --- > fs/xfs/xfs_buf.c | 110 +++++++++++++---------------------------------- > fs/xfs/xfs_buf.h | 2 - > 2 files changed, 29 insertions(+), 83 deletions(-) > > diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c > index a77e2d8c8107..303945554415 100644 > --- a/fs/xfs/xfs_buf.c > +++ b/fs/xfs/xfs_buf.c > @@ -358,7 +358,6 @@ xfs_buf_alloc_kmem( > } > bp->b_folios = bp->b_folio_array; > bp->b_folios[0] = kmem_to_folio(bp->b_addr); > - bp->b_offset = offset_in_folio(bp->b_folios[0], bp->b_addr); > bp->b_folio_count = 1; > bp->b_flags |= _XBF_KMEM; > return 0; > @@ -1549,87 +1548,44 @@ xfs_buf_bio_end_io( > static void > xfs_buf_ioapply_map( > struct xfs_buf *bp, > - int map, > - int *buf_offset, > - int *count, > + unsigned int map, I like making these never-negative quantities unsigned. > + unsigned int *buf_offset, > blk_opf_t op) > { > - int folio_index; > - unsigned int total_nr_folios = bp->b_folio_count; > - int nr_folios; > struct bio *bio; > - sector_t sector = bp->b_maps[map].bm_bn; > int size; > - int offset; > > - /* > - * If the start offset if larger than a single page, we need to be > - * careful. We might have a high order folio, in which case the indexing > - * is from the start of the buffer. However, if we have more than one > - * folio single page folio in the buffer, we need to skip the folios in > - * the buffer before the start offset. > - */ > - folio_index = 0; > - offset = *buf_offset; > - if (bp->b_folio_count > 1) { > - while (offset >= PAGE_SIZE) { > - folio_index++; > - offset -= PAGE_SIZE; > - } > + /* Limit the IO size to the length of the current vector. */ > + size = min_t(unsigned int, BBTOB(bp->b_maps[map].bm_len), > + BBTOB(bp->b_length) - *buf_offset); > + > + if (WARN_ON_ONCE(bp->b_folio_count > BIO_MAX_VECS)) { > + xfs_buf_ioerror(bp, -EIO); > + return; > } > > - /* > - * Limit the IO size to the length of the current vector, and update the > - * remaining IO count for the next time around. > - */ > - size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count); > - *count -= size; > - *buf_offset += size; > - > -next_chunk: > atomic_inc(&bp->b_io_remaining); > - nr_folios = bio_max_segs(total_nr_folios); > > - bio = bio_alloc(bp->b_target->bt_bdev, nr_folios, op, GFP_NOIO); > - bio->bi_iter.bi_sector = sector; > + bio = bio_alloc(bp->b_target->bt_bdev, bp->b_folio_count, op, GFP_NOIO); > + bio->bi_iter.bi_sector = bp->b_maps[map].bm_bn; > bio->bi_end_io = xfs_buf_bio_end_io; > bio->bi_private = bp; > > - for (; size && nr_folios; nr_folios--, folio_index++) { > - struct folio *folio = bp->b_folios[folio_index]; > - int nbytes = folio_size(folio) - offset; > - > - if (nbytes > size) > - nbytes = size; > - > - if (!bio_add_folio(bio, folio, nbytes, > - offset_in_folio(folio, offset))) > - break; > - > - offset = 0; > - sector += BTOBB(nbytes); > - size -= nbytes; > - total_nr_folios--; > - } > - > - if (likely(bio->bi_iter.bi_size)) { > - if (xfs_buf_is_vmapped(bp)) { > - flush_kernel_vmap_range(bp->b_addr, > - xfs_buf_vmap_len(bp)); > - } > - submit_bio(bio); > - if (size) > - goto next_chunk; > - } else { > - /* > - * This is guaranteed not to be the last io reference count > - * because the caller (xfs_buf_submit) holds a count itself. > - */ > - atomic_dec(&bp->b_io_remaining); > - xfs_buf_ioerror(bp, -EIO); > - bio_put(bio); > - } > - > + do { > + void *data = bp->b_addr + *buf_offset; > + struct folio *folio = kmem_to_folio(data); > + unsigned int off = offset_in_folio(folio, data); > + unsigned int len = min_t(unsigned int, size, > + folio_size(folio) - off); > + > + bio_add_folio_nofail(bio, folio, len, off); > + size -= len; > + *buf_offset += len; > + } while (size); > + > + if (xfs_buf_is_vmapped(bp)) > + flush_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); > + submit_bio(bio); > } > > STATIC void > @@ -1638,8 +1594,7 @@ _xfs_buf_ioapply( > { > struct blk_plug plug; > blk_opf_t op; > - int offset; > - int size; > + unsigned int offset = 0; > int i; > > /* > @@ -1701,16 +1656,9 @@ _xfs_buf_ioapply( > * _xfs_buf_ioapply_vec() will modify them appropriately for each > * subsequent call. > */ > - offset = bp->b_offset; Huh. So ... where does b_offset come into play here? OH. Since we're starting with b_addr and working our way /back/ to folios, we don't need b_offset anymore since we can compute that from (b_addr - folio_address()). So then the @offset variable in _xfs_buf_ioapply is really a cursor into how far into the xfs_buf we've ioapply'd. Would you mind adding a sentence to the commit message? "Instead of walking the folio array just walk the kernel virtual address in ->b_addr. This prepares for using vmalloc for buffers and removing the b_folio array. Furthermore, b_offset goes away since we can compute that from b_addr and the folio." With that changed, Reviewed-by: Darrick J. Wong <djwong@xxxxxxxxxx> --D > - size = BBTOB(bp->b_length); > blk_start_plug(&plug); > - for (i = 0; i < bp->b_map_count; i++) { > - xfs_buf_ioapply_map(bp, i, &offset, &size, op); > - if (bp->b_error) > - break; > - if (size <= 0) > - break; /* all done */ > - } > + for (i = 0; i < bp->b_map_count; i++) > + xfs_buf_ioapply_map(bp, i, &offset, op); > blk_finish_plug(&plug); > } > > diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h > index aef7015cf9f3..4d515407713b 100644 > --- a/fs/xfs/xfs_buf.h > +++ b/fs/xfs/xfs_buf.h > @@ -198,8 +198,6 @@ struct xfs_buf { > atomic_t b_pin_count; /* pin count */ > atomic_t b_io_remaining; /* #outstanding I/O requests */ > unsigned int b_folio_count; /* size of folio array */ > - unsigned int b_offset; /* page offset of b_addr, > - only for _XBF_KMEM buffers */ > int b_error; /* error code on I/O */ > > /* > -- > 2.43.0 > >