Hi Stephen, Please include a new tree in linux-next: https://git.infradead.org/users/willy/pagecache.git/shortlog/refs/heads/for-next aka git://git.infradead.org/users/willy/pagecache.git for-next There are some minor conflicts with mmotm. I resolved some of them by pulling in three patches from mmotm and rebasing on top of them. These conflicts (or near-misses) still remain, and I'm showing my resolution: +++ b/arch/arm/include/asm/cacheflush.h @@@ -290,8 -290,8 +290,9 @@@ extern void flush_cache_page(struct vm_ */ #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 extern void flush_dcache_page(struct page *); + void flush_dcache_folio(struct folio *folio); +#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 static inline void flush_kernel_vmap_range(void *addr, int size) { if ((cache_is_vivt() || cache_is_vipt_aliasing())) +++ b/mm/filemap.c @@@ -836,9 -833,9 +838,9 @@@ void replace_page_cache_page(struct pag new->mapping = mapping; new->index = offset; - mem_cgroup_migrate(old, new); + mem_cgroup_migrate(fold, fnew); - xas_lock_irqsave(&xas, flags); + xas_lock_irq(&xas); xas_store(&xas, new); old->mapping = NULL; diff --cc mm/page-writeback.c index 57b98ea365e2,c2987f05c944..96b69365de65 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@@ -2739,34 -2751,17 +2763,35 @@@ bool folio_clear_dirty_for_io(struct fo unlocked_inode_to_wb_end(inode, &cookie); return ret; } - return TestClearPageDirty(page); + return folio_test_clear_dirty(folio); } - EXPORT_SYMBOL(clear_page_dirty_for_io); + EXPORT_SYMBOL(folio_clear_dirty_for_io); +static void wb_inode_writeback_start(struct bdi_writeback *wb) +{ + atomic_inc(&wb->writeback_inodes); +} + +static void wb_inode_writeback_end(struct bdi_writeback *wb) +{ + atomic_dec(&wb->writeback_inodes); + /* + * Make sure estimate of writeback throughput gets updated after + * writeback completed. We delay the update by BANDWIDTH_INTERVAL + * (which is the interval other bandwidth updates use for batching) so + * that if multiple inodes end writeback at a similar time, they get + * batched into one bandwidth update. + */ + queue_delayed_work(bdi_wq, &wb->bw_dwork, BANDWIDTH_INTERVAL); +} + - int test_clear_page_writeback(struct page *page) + bool __folio_end_writeback(struct folio *folio) { - struct address_space *mapping = page_mapping(page); - int ret; + long nr = folio_nr_pages(folio); + struct address_space *mapping = folio_mapping(folio); + bool ret; - lock_page_memcg(page); + folio_memcg_lock(folio); if (mapping && mapping_use_writeback_tags(mapping)) { struct inode *inode = mapping->host; struct backing_dev_info *bdi = inode_to_bdi(inode); @@@ -2780,11 -2775,8 +2805,11 @@@ if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { struct bdi_writeback *wb = inode_to_wb(inode); - dec_wb_stat(wb, WB_WRITEBACK); - __wb_writeout_inc(wb); + wb_stat_mod(wb, WB_WRITEBACK, -nr); + __wb_writeout_add(wb, nr); + if (!mapping_tagged(mapping, + PAGECACHE_TAG_WRITEBACK)) + wb_inode_writeback_end(wb); } } @@@ -2827,18 -2821,14 +2854,18 @@@ bool __folio_start_writeback(struct fol PAGECACHE_TAG_WRITEBACK); xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); - if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) - wb_stat_mod(inode_to_wb(inode), WB_WRITEBACK, - nr); + if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { + struct bdi_writeback *wb = inode_to_wb(inode); + - inc_wb_stat(wb, WB_WRITEBACK); ++ wb_stat_mod(wb, WB_WRITEBACK, nr); + if (!on_wblist) + wb_inode_writeback_start(wb); + } /* - * We can come through here when swapping anonymous - * pages, so we don't necessarily have an inode to track - * for sync. + * We can come through here when swapping + * anonymous folios, so we don't necessarily + * have an inode to track for sync. */ if (mapping->host && !on_wblist) sb_mark_inode_writeback(mapping->host);