The iomap ioend mechanism has the ability to construct very large, contiguous bios and/or bio chains. This has been reported to lead to soft lockup warnings in bio completion due to the amount of page processing that occurs. Update the ioend completion path with a parameter to indicate atomic context and insert a cond_resched() call to avoid soft lockups in either scenario. Signed-off-by: Brian Foster <bfoster@xxxxxxxxxx> --- fs/iomap/buffered-io.c | 15 +++++++++------ fs/xfs/xfs_aops.c | 2 +- include/linux/iomap.h | 2 +- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 414769a6ad11..642422775e4e 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1061,7 +1061,7 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page, * ioend after this. */ static void -iomap_finish_ioend(struct iomap_ioend *ioend, int error) +iomap_finish_ioend(struct iomap_ioend *ioend, int error, bool atomic) { struct inode *inode = ioend->io_inode; struct bio *bio = &ioend->io_inline_bio; @@ -1084,9 +1084,12 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error) next = bio->bi_private; /* walk each page on bio, ending page IO on them */ - bio_for_each_segment_all(bv, bio, iter_all) + bio_for_each_segment_all(bv, bio, iter_all) { iomap_finish_page_writeback(inode, bv->bv_page, error, bv->bv_len); + if (!atomic) + cond_resched(); + } bio_put(bio); } /* The ioend has been freed by bio_put() */ @@ -1099,17 +1102,17 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error) } void -iomap_finish_ioends(struct iomap_ioend *ioend, int error) +iomap_finish_ioends(struct iomap_ioend *ioend, int error, bool atomic) { struct list_head tmp; list_replace_init(&ioend->io_list, &tmp); - iomap_finish_ioend(ioend, error); + iomap_finish_ioend(ioend, error, atomic); while (!list_empty(&tmp)) { ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); list_del_init(&ioend->io_list); - iomap_finish_ioend(ioend, error); + iomap_finish_ioend(ioend, error, atomic); } } EXPORT_SYMBOL_GPL(iomap_finish_ioends); @@ -1178,7 +1181,7 @@ static void iomap_writepage_end_bio(struct bio *bio) { struct iomap_ioend *ioend = bio->bi_private; - iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status)); + iomap_finish_ioend(ioend, blk_status_to_errno(bio->bi_status), true); } /* diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 9b08db45ce85..84cd6cf46b12 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -123,7 +123,7 @@ xfs_end_ioend( if (!error && xfs_ioend_is_append(ioend)) error = xfs_setfilesize(ip, ioend->io_offset, ioend->io_size); done: - iomap_finish_ioends(ioend, error); + iomap_finish_ioends(ioend, error, false); memalloc_nofs_restore(nofs_flag); } diff --git a/include/linux/iomap.h b/include/linux/iomap.h index d202fd2d0f91..07f3f4e69084 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -232,7 +232,7 @@ struct iomap_writepage_ctx { const struct iomap_writeback_ops *ops; }; -void iomap_finish_ioends(struct iomap_ioend *ioend, int error); +void iomap_finish_ioends(struct iomap_ioend *ioend, int error, bool atomic); void iomap_ioend_try_merge(struct iomap_ioend *ioend, struct list_head *more_ioends, void (*merge_private)(struct iomap_ioend *ioend, -- 2.26.3