[RFC PATCH 07/10] iomap: Reorder functions

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Move the ioend creation functions earlier in the file so write_end can
create ioends without requiring forward declarations.

Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx>
---
 fs/iomap/buffered-io.c | 215 ++++++++++++++++++++---------------------
 1 file changed, 107 insertions(+), 108 deletions(-)

diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 5b69cea71f71..4aa2209fb003 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -558,6 +558,113 @@ static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
 	return submit_bio_wait(&bio);
 }
 
+static bool iomap_can_add_to_ioend(struct iomap *iomap,
+		struct iomap_ioend *ioend, loff_t offset, sector_t sector)
+{
+	if ((iomap->flags & IOMAP_F_SHARED) !=
+	    (ioend->io_flags & IOMAP_F_SHARED))
+		return false;
+	if (iomap->type != ioend->io_type)
+		return false;
+	if (offset != ioend->io_offset + ioend->io_size)
+		return false;
+	if (sector != bio_end_sector(ioend->io_bio))
+		return false;
+	/*
+	 * Limit ioend bio chain lengths to minimise IO completion latency. This
+	 * also prevents long tight loops ending page writeback on all the
+	 * folios in the ioend.
+	 */
+	if (ioend->io_folios >= IOEND_BATCH_SIZE)
+		return false;
+	return true;
+}
+
+static struct iomap_ioend *iomap_alloc_ioend(struct inode *inode,
+		struct iomap *iomap, loff_t offset, sector_t sector,
+		struct writeback_control *wbc)
+{
+	struct iomap_ioend *ioend;
+	struct bio *bio;
+
+	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset);
+	bio_set_dev(bio, iomap->bdev);
+	bio->bi_iter.bi_sector = sector;
+	bio->bi_opf = REQ_OP_WRITE;
+	bio->bi_write_hint = inode->i_write_hint;
+
+	if (wbc) {
+		bio->bi_opf |= wbc_to_write_flags(wbc);
+		wbc_init_bio(wbc, bio);
+	}
+
+	ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
+	INIT_LIST_HEAD(&ioend->io_list);
+	ioend->io_type = iomap->type;
+	ioend->io_flags = iomap->flags;
+	ioend->io_inode = inode;
+	ioend->io_size = 0;
+	ioend->io_folios = 0;
+	ioend->io_offset = offset;
+	ioend->io_bio = bio;
+	ioend->io_sector = sector;
+	return ioend;
+}
+
+/*
+ * Allocate a new bio, and chain the old bio to the new one.
+ *
+ * Note that we have to perform the chaining in this unintuitive order
+ * so that the bi_private linkage is set up in the right direction for the
+ * traversal in iomap_finish_ioend().
+ */
+static struct bio *iomap_chain_bio(struct bio *prev)
+{
+	struct bio *new;
+
+	new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
+	bio_copy_dev(new, prev);/* also copies over blkcg information */
+	new->bi_iter.bi_sector = bio_end_sector(prev);
+	new->bi_opf = prev->bi_opf;
+	new->bi_write_hint = prev->bi_write_hint;
+
+	bio_chain(prev, new);
+	bio_get(prev);		/* for iomap_finish_ioend */
+	submit_bio(prev);
+	return new;
+}
+
+/*
+ * Test to see if we have an existing ioend structure that we could append to
+ * first; otherwise finish off the current ioend and start another.
+ */
+static struct iomap_ioend *iomap_add_to_ioend(struct inode *inode,
+		loff_t pos, size_t len, struct folio *folio,
+		struct iomap_page *iop, struct iomap *iomap,
+		struct iomap_ioend *ioend, struct writeback_control *wbc,
+		struct list_head *iolist)
+{
+	sector_t sector = iomap_sector(iomap, pos);
+	size_t poff = offset_in_folio(folio, pos);
+
+	if (!ioend || !iomap_can_add_to_ioend(iomap, ioend, pos, sector)) {
+		if (ioend)
+			list_add(&ioend->io_list, iolist);
+		ioend = iomap_alloc_ioend(inode, iomap, pos, sector, wbc);
+	}
+
+	if (!bio_add_folio(ioend->io_bio, folio, len, poff)) {
+		ioend->io_bio = iomap_chain_bio(ioend->io_bio);
+		bio_add_folio(ioend->io_bio, folio, len, poff);
+	}
+
+	if (iop)
+		atomic_add(len, &iop->write_bytes_pending);
+	ioend->io_size += len;
+	wbc_account_cgroup_owner(wbc, &folio->page, len);
+	return ioend;
+}
+
 static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
 		size_t len, struct folio *folio)
 {
@@ -1222,114 +1329,6 @@ iomap_submit_ioend(struct iomap_writepage_ctx *wpc, struct iomap_ioend *ioend,
 	return 0;
 }
 
-static struct iomap_ioend *iomap_alloc_ioend(struct inode *inode,
-		struct iomap *iomap, loff_t offset, sector_t sector,
-		struct writeback_control *wbc)
-{
-	struct iomap_ioend *ioend;
-	struct bio *bio;
-
-	bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, &iomap_ioend_bioset);
-	bio_set_dev(bio, iomap->bdev);
-	bio->bi_iter.bi_sector = sector;
-	bio->bi_opf = REQ_OP_WRITE;
-	bio->bi_write_hint = inode->i_write_hint;
-
-	if (wbc) {
-		bio->bi_opf |= wbc_to_write_flags(wbc);
-		wbc_init_bio(wbc, bio);
-	}
-
-	ioend = container_of(bio, struct iomap_ioend, io_inline_bio);
-	INIT_LIST_HEAD(&ioend->io_list);
-	ioend->io_type = iomap->type;
-	ioend->io_flags = iomap->flags;
-	ioend->io_inode = inode;
-	ioend->io_size = 0;
-	ioend->io_folios = 0;
-	ioend->io_offset = offset;
-	ioend->io_bio = bio;
-	ioend->io_sector = sector;
-	return ioend;
-}
-
-/*
- * Allocate a new bio, and chain the old bio to the new one.
- *
- * Note that we have to perform the chaining in this unintuitive order
- * so that the bi_private linkage is set up in the right direction for the
- * traversal in iomap_finish_ioend().
- */
-static struct bio *
-iomap_chain_bio(struct bio *prev)
-{
-	struct bio *new;
-
-	new = bio_alloc(GFP_NOFS, BIO_MAX_VECS);
-	bio_copy_dev(new, prev);/* also copies over blkcg information */
-	new->bi_iter.bi_sector = bio_end_sector(prev);
-	new->bi_opf = prev->bi_opf;
-	new->bi_write_hint = prev->bi_write_hint;
-
-	bio_chain(prev, new);
-	bio_get(prev);		/* for iomap_finish_ioend */
-	submit_bio(prev);
-	return new;
-}
-
-static bool iomap_can_add_to_ioend(struct iomap *iomap,
-		struct iomap_ioend *ioend, loff_t offset, sector_t sector)
-{
-	if ((iomap->flags & IOMAP_F_SHARED) !=
-	    (ioend->io_flags & IOMAP_F_SHARED))
-		return false;
-	if (iomap->type != ioend->io_type)
-		return false;
-	if (offset != ioend->io_offset + ioend->io_size)
-		return false;
-	if (sector != bio_end_sector(ioend->io_bio))
-		return false;
-	/*
-	 * Limit ioend bio chain lengths to minimise IO completion latency. This
-	 * also prevents long tight loops ending page writeback on all the
-	 * folios in the ioend.
-	 */
-	if (ioend->io_folios >= IOEND_BATCH_SIZE)
-		return false;
-	return true;
-}
-
-/*
- * Test to see if we have an existing ioend structure that we could append to
- * first; otherwise finish off the current ioend and start another.
- */
-static struct iomap_ioend *iomap_add_to_ioend(struct inode *inode,
-		loff_t pos, size_t len, struct folio *folio,
-		struct iomap_page *iop, struct iomap *iomap,
-		struct iomap_ioend *ioend, struct writeback_control *wbc,
-		struct list_head *iolist)
-{
-	sector_t sector = iomap_sector(iomap, pos);
-	size_t poff = offset_in_folio(folio, pos);
-
-	if (!ioend || !iomap_can_add_to_ioend(iomap, ioend, pos, sector)) {
-		if (ioend)
-			list_add(&ioend->io_list, iolist);
-		ioend = iomap_alloc_ioend(inode, iomap, pos, sector, wbc);
-	}
-
-	if (!bio_add_folio(ioend->io_bio, folio, len, poff)) {
-		ioend->io_bio = iomap_chain_bio(ioend->io_bio);
-		bio_add_folio(ioend->io_bio, folio, len, poff);
-	}
-
-	if (iop)
-		atomic_add(len, &iop->write_bytes_pending);
-	ioend->io_size += len;
-	wbc_account_cgroup_owner(wbc, &folio->page, len);
-	return ioend;
-}
-
 /*
  * We implement an immediate ioend submission policy here to avoid needing to
  * chain multiple ioends and hence nest mempool allocations which can violate
-- 
2.34.1




[Index of Archives]     [Linux Ext4 Filesystem]     [Union Filesystem]     [Filesystem Testing]     [Ceph Users]     [Ecryptfs]     [NTFS 3]     [AutoFS]     [Kernel Newbies]     [Share Photos]     [Security]     [Netfilter]     [Bugtraq]     [Yosemite News]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux Cachefs]     [Reiser Filesystem]     [Linux RAID]     [NTFS 3]     [Samba]     [Device Mapper]     [CEPH Development]

  Powered by Linux