The patch titled Subject: mm: remove __hugepage_set_anon_rmap() has been removed from the -mm tree. Its filename was mm-remove-__hugepage_set_anon_rmap.patch This patch was dropped because it was merged into mainline or a subsystem tree ------------------------------------------------------ From: Kirill Tkhai <ktkhai@xxxxxxxxxxxxx> Subject: mm: remove __hugepage_set_anon_rmap() This function is identical to __page_set_anon_rmap() since the time, when it was introduced (8 years ago). The patch removes the function, and makes its users to use __page_set_anon_rmap() instead. Link: http://lkml.kernel.org/r/154504875359.30235.6237926369392564851.stgit@localhost.localdomain Signed-off-by: Kirill Tkhai <ktkhai@xxxxxxxxxxxxx> Acked-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> Reviewed-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Reviewed-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Jerome Glisse <jglisse@xxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- --- a/mm/rmap.c~mm-remove-__hugepage_set_anon_rmap +++ a/mm/rmap.c @@ -1019,7 +1019,7 @@ void page_move_anon_rmap(struct page *pa /** * __page_set_anon_rmap - set up new anonymous rmap - * @page: Page to add to rmap + * @page: Page or Hugepage to add to rmap * @vma: VM area to add page to. * @address: User virtual address of the mapping * @exclusive: the page is exclusively owned by the current process @@ -1916,27 +1916,10 @@ void rmap_walk_locked(struct page *page, #ifdef CONFIG_HUGETLB_PAGE /* - * The following three functions are for anonymous (private mapped) hugepages. + * The following two functions are for anonymous (private mapped) hugepages. * Unlike common anonymous pages, anonymous hugepages have no accounting code * and no lru code, because we handle hugepages differently from common pages. */ -static void __hugepage_set_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address, int exclusive) -{ - struct anon_vma *anon_vma = vma->anon_vma; - - BUG_ON(!anon_vma); - - if (PageAnon(page)) - return; - if (!exclusive) - anon_vma = anon_vma->root; - - anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; - page->mapping = (struct address_space *) anon_vma; - page->index = linear_page_index(vma, address); -} - void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { @@ -1948,7 +1931,7 @@ void hugepage_add_anon_rmap(struct page /* address might be in next vma when migration races vma_adjust */ first = atomic_inc_and_test(compound_mapcount_ptr(page)); if (first) - __hugepage_set_anon_rmap(page, vma, address, 0); + __page_set_anon_rmap(page, vma, address, 0); } void hugepage_add_new_anon_rmap(struct page *page, @@ -1956,6 +1939,6 @@ void hugepage_add_new_anon_rmap(struct p { BUG_ON(address < vma->vm_start || address >= vma->vm_end); atomic_set(compound_mapcount_ptr(page), 0); - __hugepage_set_anon_rmap(page, vma, address, 1); + __page_set_anon_rmap(page, vma, address, 1); } #endif /* CONFIG_HUGETLB_PAGE */ _ Patches currently in -mm which might be from ktkhai@xxxxxxxxxxxxx are mm-reuse-only-pte-mapped-ksm-page-in-do_wp_page.patch