When pageout a dirty page, try to piggy back more consecutive dirty pages (up to 512KB) to improve IO efficiency. Only ext3/reiserfs which don't have its own aops->writepages are supported in this initial version. CC: Dave Chinner <david@xxxxxxxxxxxxx> Signed-off-by: Wu Fengguang <fengguang.wu@xxxxxxxxx> --- mm/page-writeback.c | 12 ++++++++++++ mm/vmscan.c | 11 +++++++++++ 2 files changed, 23 insertions(+) --- linux.orig/mm/vmscan.c 2009-10-06 23:37:39.000000000 +0800 +++ linux/mm/vmscan.c 2009-10-06 23:39:30.000000000 +0800 @@ -344,6 +344,8 @@ typedef enum { PAGE_CLEAN, } pageout_t; +#define LUMPY_PAGEOUT_PAGES (512 * 1024 / PAGE_CACHE_SIZE) + /* * pageout is called by shrink_page_list() for each dirty page. * Calls ->writepage(). @@ -409,6 +411,15 @@ static pageout_t pageout(struct page *pa } /* + * only write_cache_pages() supports for_reclaim for now + */ + if (!mapping->a_ops->writepages) { + wbc.range_start = (page->index + 1) << PAGE_CACHE_SHIFT; + wbc.nr_to_write = LUMPY_PAGEOUT_PAGES - 1; + generic_writepages(mapping, &wbc); + } + + /* * Wait on writeback if requested to. This happens when * direct reclaiming a large contiguous area and the * first attempt to free a range of pages fails. --- linux.orig/mm/page-writeback.c 2009-10-06 23:39:29.000000000 +0800 +++ linux/mm/page-writeback.c 2009-10-06 23:39:30.000000000 +0800 @@ -805,6 +805,11 @@ int write_cache_pages(struct address_spa break; } + if (wbc->for_reclaim && done_index != page->index) { + done = 1; + break; + } + if (nr_to_write != wbc->nr_to_write && done_index + WB_SEGMENT_DIST < page->index && --wbc->nr_segments <= 0) { @@ -846,6 +851,13 @@ continue_unlock: if (!clear_page_dirty_for_io(page)) goto continue_unlock; + /* + * active and unevictable pages will be checked at + * rotate time + */ + if (wbc->for_reclaim) + SetPageReclaim(page); + ret = (*writepage)(page, wbc, data); if (unlikely(ret)) { if (ret == AOP_WRITEPAGE_ACTIVATE) { -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html