Now that we have a migrate_folio method, there is no need for a writepage method. All writeback will go through the writepages method instead which is more efficient. Signed-off-by: Matthew Wilcox (Oracle) <willy@xxxxxxxxxxxxx> --- fs/ceph/addr.c | 25 ------------------------- 1 file changed, 25 deletions(-) diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index a0a1fac1a0db..785f2983ac0e 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -795,30 +795,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) return err; } -static int ceph_writepage(struct page *page, struct writeback_control *wbc) -{ - int err; - struct inode *inode = page->mapping->host; - BUG_ON(!inode); - ihold(inode); - - if (wbc->sync_mode == WB_SYNC_NONE && - ceph_inode_to_client(inode)->write_congested) - return AOP_WRITEPAGE_ACTIVATE; - - wait_on_page_fscache(page); - - err = writepage_nounlock(page, wbc); - if (err == -ERESTARTSYS) { - /* direct memory reclaimer was killed by SIGKILL. return 0 - * to prevent caller from setting mapping/page error */ - err = 0; - } - unlock_page(page); - iput(inode); - return err; -} - /* * async writeback completion handler. * @@ -1555,7 +1531,6 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, const struct address_space_operations ceph_aops = { .read_folio = netfs_read_folio, .readahead = netfs_readahead, - .writepage = ceph_writepage, .writepages = ceph_writepages_start, .write_begin = ceph_write_begin, .write_end = ceph_write_end, -- 2.40.1