Let's prepare for passing RMAP_EXCLUSIVE, similarly as we do for page_add_anon_rmap() now. RMAP_COMPOUND is implicit for hugetlb pages and ignored. Signed-off-by: David Hildenbrand <david@xxxxxxxxxx> --- include/linux/rmap.h | 2 +- mm/migrate.c | 2 +- mm/rmap.c | 8 +++++--- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/include/linux/rmap.h b/include/linux/rmap.h index f230e86b4587..593a4566420f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -174,7 +174,7 @@ void page_add_file_rmap(struct page *, bool); void page_remove_rmap(struct page *, bool); void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, - unsigned long); + unsigned long, int); void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); diff --git a/mm/migrate.c b/mm/migrate.c index d4d72a15224c..709cb11d5b81 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -238,7 +238,7 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, pte = pte_mkhuge(pte); pte = arch_make_huge_pte(pte, shift, vma->vm_flags); if (PageAnon(new)) - hugepage_add_anon_rmap(new, vma, pvmw.address); + hugepage_add_anon_rmap(new, vma, pvmw.address, 0); else page_dup_file_rmap(new, true); set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); diff --git a/mm/rmap.c b/mm/rmap.c index 902ebf99d147..bafdb7f70cec 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2409,9 +2409,11 @@ void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) * The following two functions are for anonymous (private mapped) hugepages. * Unlike common anonymous pages, anonymous hugepages have no accounting code * and no lru code, because we handle hugepages differently from common pages. + * + * RMAP_COMPOUND is ignored. */ -void hugepage_add_anon_rmap(struct page *page, - struct vm_area_struct *vma, unsigned long address) +void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma, + unsigned long address, int flags) { struct anon_vma *anon_vma = vma->anon_vma; int first; @@ -2421,7 +2423,7 @@ void hugepage_add_anon_rmap(struct page *page, /* address might be in next vma when migration races vma_adjust */ first = atomic_inc_and_test(compound_mapcount_ptr(page)); if (first) - __page_set_anon_rmap(page, vma, address, 0); + __page_set_anon_rmap(page, vma, address, flags & RMAP_EXCLUSIVE); } void hugepage_add_new_anon_rmap(struct page *page, -- 2.35.1