Rather than check the result of test-and-clear, just check that we have the writeback bit set at the start. This wouldn't catch every case, but it's good enough (and enables the next patch). Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- mm/filemap.c | 9 +++++++-- mm/internal.h | 2 +- mm/page-writeback.c | 38 ++++++++++++++++---------------------- 3 files changed, 24 insertions(+), 25 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 8262b85593be..53c0d71aae8e 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1595,9 +1595,15 @@ EXPORT_SYMBOL(folio_wait_private_2_killable); /** * folio_end_writeback - End writeback against a folio. * @folio: The folio. + * + * The folio must actually be under writeback. + * + * Context: May be called from process or interrupt context. */ void folio_end_writeback(struct folio *folio) { + VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio); + /* * folio_test_clear_reclaim() could be used here but it is an * atomic operation and overkill in this particular case. Failing @@ -1617,8 +1623,7 @@ void folio_end_writeback(struct folio *folio) * reused before the folio_wake(). */ folio_get(folio); - if (!__folio_end_writeback(folio)) - BUG(); + __folio_end_writeback(folio); smp_mb__after_atomic(); folio_wake(folio, PG_writeback); diff --git a/mm/internal.h b/mm/internal.h index 30cf724ddbce..ccb08dd9b5ec 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -105,7 +105,7 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat) vm_fault_t do_swap_page(struct vm_fault *vmf); void folio_rotate_reclaimable(struct folio *folio); -bool __folio_end_writeback(struct folio *folio); +void __folio_end_writeback(struct folio *folio); void deactivate_file_folio(struct folio *folio); void folio_activate(struct folio *folio); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b8d3d7040a50..410b53e888e3 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2940,11 +2940,10 @@ static void wb_inode_writeback_end(struct bdi_writeback *wb) spin_unlock_irqrestore(&wb->work_lock, flags); } -bool __folio_end_writeback(struct folio *folio) +void __folio_end_writeback(struct folio *folio) { long nr = folio_nr_pages(folio); struct address_space *mapping = folio_mapping(folio); - bool ret; folio_memcg_lock(folio); if (mapping && mapping_use_writeback_tags(mapping)) { @@ -2953,19 +2952,16 @@ bool __folio_end_writeback(struct folio *folio) unsigned long flags; xa_lock_irqsave(&mapping->i_pages, flags); - ret = folio_test_clear_writeback(folio); - if (ret) { - __xa_clear_mark(&mapping->i_pages, folio_index(folio), - PAGECACHE_TAG_WRITEBACK); - if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { - struct bdi_writeback *wb = inode_to_wb(inode); - - wb_stat_mod(wb, WB_WRITEBACK, -nr); - __wb_writeout_add(wb, nr); - if (!mapping_tagged(mapping, - PAGECACHE_TAG_WRITEBACK)) - wb_inode_writeback_end(wb); - } + folio_test_clear_writeback(folio); + __xa_clear_mark(&mapping->i_pages, folio_index(folio), + PAGECACHE_TAG_WRITEBACK); + if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { + struct bdi_writeback *wb = inode_to_wb(inode); + + wb_stat_mod(wb, WB_WRITEBACK, -nr); + __wb_writeout_add(wb, nr); + if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) + wb_inode_writeback_end(wb); } if (mapping->host && !mapping_tagged(mapping, @@ -2974,15 +2970,13 @@ bool __folio_end_writeback(struct folio *folio) xa_unlock_irqrestore(&mapping->i_pages, flags); } else { - ret = folio_test_clear_writeback(folio); - } - if (ret) { - lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr); - zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); - node_stat_mod_folio(folio, NR_WRITTEN, nr); + folio_test_clear_writeback(folio); } + + lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr); + zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); + node_stat_mod_folio(folio, NR_WRITTEN, nr); folio_memcg_unlock(folio); - return ret; } bool __folio_start_writeback(struct folio *folio, bool keep_write) -- 2.40.1