From: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx> As with add_to_page_cache_locked() we handle HPAGE_CACHE_NR pages a time. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> --- mm/filemap.c | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 619e6cb..b75bdf5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -115,6 +115,7 @@ void __delete_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; + int i, nr; trace_mm_filemap_delete_from_page_cache(page); /* @@ -127,13 +128,21 @@ void __delete_from_page_cache(struct page *page) else cleancache_invalidate_page(mapping, page); - radix_tree_delete(&mapping->page_tree, page->index); + nr = hpagecache_nr_pages(page); + for (i = 0; i < nr; i++) { + page[i].mapping = NULL; + radix_tree_delete(&mapping->page_tree, page->index + i); + } + /* thp */ + if (nr > 1) + __dec_zone_page_state(page, NR_FILE_TRANSPARENT_HUGEPAGES); + page->mapping = NULL; /* Leave page->index set: truncation lookup relies upon it */ - mapping->nrpages--; - __dec_zone_page_state(page, NR_FILE_PAGES); + mapping->nrpages -= nr; + __mod_zone_page_state(page_zone(page), NR_FILE_PAGES, -nr); if (PageSwapBacked(page)) - __dec_zone_page_state(page, NR_SHMEM); + __mod_zone_page_state(page_zone(page), NR_SHMEM, -nr); BUG_ON(page_mapped(page)); /* @@ -144,8 +153,8 @@ void __delete_from_page_cache(struct page *page) * having removed the page entirely. */ if (PageDirty(page) && mapping_cap_account_dirty(mapping)) { - dec_zone_page_state(page, NR_FILE_DIRTY); - dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); + mod_zone_page_state(page_zone(page), NR_FILE_DIRTY, -nr); + add_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE, -nr); } } -- 1.8.3.2 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>