Firstly this patch renames iop->uptodate to iop->state bitmap. This is because we will add dirty state to iop->state bitmap in later patches. So it makes sense to rename the iop->uptodate bitmap to iop->state. Secondly this patch also adds other helpers for uptodate state bitmap handling of iop->state. No functionality change in this patch. Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@xxxxxxxxx> --- fs/iomap/buffered-io.c | 78 +++++++++++++++++++++++++++++++----------- 1 file changed, 58 insertions(+), 20 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index e732581dc2d4..5103b644e115 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -24,14 +24,14 @@ #define IOEND_BATCH_SIZE 4096 /* - * Structure allocated for each folio when block size < folio size - * to track sub-folio uptodate status and I/O completions. + * Structure allocated for each folio to track per-block uptodate state + * and I/O completions. */ struct iomap_page { atomic_t read_bytes_pending; atomic_t write_bytes_pending; - spinlock_t uptodate_lock; - unsigned long uptodate[]; + spinlock_t state_lock; + unsigned long state[]; }; static inline struct iomap_page *to_iomap_page(struct folio *folio) @@ -43,6 +43,47 @@ static inline struct iomap_page *to_iomap_page(struct folio *folio) static struct bio_set iomap_ioend_bioset; +/* + * inline helpers for bitmap operations on iop->state + */ +static inline void iop_set_range(struct iomap_page *iop, unsigned int start_blk, + unsigned int nr_blks) +{ + bitmap_set(iop->state, start_blk, nr_blks); +} + +static inline bool iop_test_block(struct iomap_page *iop, unsigned int block) +{ + return test_bit(block, iop->state); +} + +static inline bool iop_bitmap_full(struct iomap_page *iop, + unsigned int blks_per_folio) +{ + return bitmap_full(iop->state, blks_per_folio); +} + +/* + * iop related helpers for checking uptodate/dirty state of per-block + * or range of blocks within a folio + */ +static bool iop_test_full_uptodate(struct folio *folio) +{ + struct iomap_page *iop = to_iomap_page(folio); + struct inode *inode = folio->mapping->host; + + WARN_ON(!iop); + return iop_bitmap_full(iop, i_blocks_per_folio(inode, folio)); +} + +static bool iop_test_block_uptodate(struct folio *folio, unsigned int block) +{ + struct iomap_page *iop = to_iomap_page(folio); + + WARN_ON(!iop); + return iop_test_block(iop, block); +} + static void iop_set_range_uptodate(struct inode *inode, struct folio *folio, size_t off, size_t len) { @@ -53,12 +94,11 @@ static void iop_set_range_uptodate(struct inode *inode, struct folio *folio, unsigned long flags; if (iop) { - spin_lock_irqsave(&iop->uptodate_lock, flags); - bitmap_set(iop->uptodate, first_blk, nr_blks); - if (bitmap_full(iop->uptodate, - i_blocks_per_folio(inode, folio))) + spin_lock_irqsave(&iop->state_lock, flags); + iop_set_range(iop, first_blk, nr_blks); + if (iop_test_full_uptodate(folio)) folio_mark_uptodate(folio); - spin_unlock_irqrestore(&iop->uptodate_lock, flags); + spin_unlock_irqrestore(&iop->state_lock, flags); } else { folio_mark_uptodate(folio); } @@ -79,12 +119,12 @@ static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio, else gfp = GFP_NOFS | __GFP_NOFAIL; - iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)), + iop = kzalloc(struct_size(iop, state, BITS_TO_LONGS(nr_blocks)), gfp); if (iop) { - spin_lock_init(&iop->uptodate_lock); + spin_lock_init(&iop->state_lock); if (folio_test_uptodate(folio)) - bitmap_fill(iop->uptodate, nr_blocks); + iop_set_range(iop, 0, nr_blocks); folio_attach_private(folio, iop); } return iop; @@ -93,15 +133,13 @@ static struct iomap_page *iop_alloc(struct inode *inode, struct folio *folio, static void iop_free(struct folio *folio) { struct iomap_page *iop = to_iomap_page(folio); - struct inode *inode = folio->mapping->host; - unsigned int nr_blocks = i_blocks_per_folio(inode, folio); if (!iop) return; WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending)); WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending)); - WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) != - folio_test_uptodate(folio)); + WARN_ON_ONCE(iop_test_full_uptodate(folio) != + folio_test_uptodate(folio)); folio_detach_private(folio); kfree(iop); } @@ -132,7 +170,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, /* move forward for each leading block marked uptodate */ for (i = first; i <= last; i++) { - if (!test_bit(i, iop->uptodate)) + if (!iop_test_block_uptodate(folio, i)) break; *pos += block_size; poff += block_size; @@ -142,7 +180,7 @@ static void iomap_adjust_read_range(struct inode *inode, struct folio *folio, /* truncate len if we find any trailing uptodate block(s) */ for ( ; i <= last; i++) { - if (test_bit(i, iop->uptodate)) { + if (iop_test_block_uptodate(folio, i)) { plen -= (last - i + 1) * block_size; last = i - 1; break; @@ -450,7 +488,7 @@ bool iomap_is_partially_uptodate(struct folio *folio, size_t from, size_t count) last = (from + count - 1) >> inode->i_blkbits; for (i = first; i <= last; i++) - if (!test_bit(i, iop->uptodate)) + if (!iop_test_block_uptodate(folio, i)) return false; return true; } @@ -1634,7 +1672,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, * invalid, grab a new one. */ for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) { - if (iop && !test_bit(i, iop->uptodate)) + if (iop && !iop_test_block_uptodate(folio, i)) continue; error = wpc->ops->map_blocks(wpc, inode, pos); -- 2.39.2