From: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx> The base scheme is the same as for anonymous pages, but we walk by mapping->i_mmap rather then anon_vma->rb_root. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> --- mm/huge_memory.c | 68 +++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 57 insertions(+), 11 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index ed31e90..73974e8 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1655,23 +1655,23 @@ static void __split_huge_page_refcount(struct page *page, */ page_tail->_mapcount = page->_mapcount; - BUG_ON(page_tail->mapping); page_tail->mapping = page->mapping; page_tail->index = page->index + i; page_nid_xchg_last(page_tail, page_nid_last(page)); - BUG_ON(!PageAnon(page_tail)); BUG_ON(!PageUptodate(page_tail)); BUG_ON(!PageDirty(page_tail)); - BUG_ON(!PageSwapBacked(page_tail)); lru_add_page_tail(page, page_tail, lruvec, list); } atomic_sub(tail_count, &page->_count); BUG_ON(atomic_read(&page->_count) <= 0); - __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); + if (PageAnon(page)) + __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1); + else + __mod_zone_page_state(zone, NR_FILE_TRANSPARENT_HUGEPAGES, -1); ClearPageCompound(page); compound_unlock(page); @@ -1771,7 +1771,7 @@ static int __split_huge_page_map(struct page *page, } /* must be called with anon_vma->root->rwsem held */ -static void __split_huge_page(struct page *page, +static void __split_anon_huge_page(struct page *page, struct anon_vma *anon_vma, struct list_head *list) { @@ -1795,7 +1795,7 @@ static void __split_huge_page(struct page *page, * and establishes a child pmd before * __split_huge_page_splitting() freezes the parent pmd (so if * we fail to prevent copy_huge_pmd() from running until the - * whole __split_huge_page() is complete), we will still see + * whole __split_anon_huge_page() is complete), we will still see * the newly established pmd of the child later during the * walk, to be able to set it as pmd_trans_splitting too. */ @@ -1826,14 +1826,11 @@ static void __split_huge_page(struct page *page, * from the hugepage. * Return 0 if the hugepage is split successfully otherwise return 1. */ -int split_huge_page_to_list(struct page *page, struct list_head *list) +static int split_anon_huge_page(struct page *page, struct list_head *list) { struct anon_vma *anon_vma; int ret = 1; - BUG_ON(is_huge_zero_page(page)); - BUG_ON(!PageAnon(page)); - /* * The caller does not necessarily hold an mmap_sem that would prevent * the anon_vma disappearing so we first we take a reference to it @@ -1851,7 +1848,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) goto out_unlock; BUG_ON(!PageSwapBacked(page)); - __split_huge_page(page, anon_vma, list); + __split_anon_huge_page(page, anon_vma, list); count_vm_event(THP_SPLIT); BUG_ON(PageCompound(page)); @@ -1862,6 +1859,55 @@ out: return ret; } +static int split_file_huge_page(struct page *page, struct list_head *list) +{ + struct address_space *mapping = page->mapping; + pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + struct vm_area_struct *vma; + int mapcount, mapcount2; + + BUG_ON(!PageHead(page)); + BUG_ON(PageTail(page)); + + mutex_lock(&mapping->i_mmap_mutex); + mapcount = 0; + vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { + unsigned long addr = vma_address(page, vma); + mapcount += __split_huge_page_splitting(page, vma, addr); + } + + if (mapcount != page_mapcount(page)) + printk(KERN_ERR "mapcount %d page_mapcount %d\n", + mapcount, page_mapcount(page)); + BUG_ON(mapcount != page_mapcount(page)); + + __split_huge_page_refcount(page, list); + + mapcount2 = 0; + vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { + unsigned long addr = vma_address(page, vma); + mapcount2 += __split_huge_page_map(page, vma, addr); + } + + if (mapcount != mapcount2) + printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n", + mapcount, mapcount2, page_mapcount(page)); + BUG_ON(mapcount != mapcount2); + count_vm_event(THP_SPLIT); + mutex_unlock(&mapping->i_mmap_mutex); + return 0; +} + +int split_huge_page_to_list(struct page *page, struct list_head *list) +{ + BUG_ON(is_huge_zero_page(page)); + + if (PageAnon(page)) + return split_anon_huge_page(page, list); + else + return split_file_huge_page(page, list); +} + #define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE) int hugepage_madvise(struct vm_area_struct *vma, -- 1.7.10.4 -- To unsubscribe from this list: send the line "unsubscribe linux-fsdevel" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html