On Wed, Aug 07, 2024 at 02:46:47PM +0100, Usama Arif wrote: > @@ -177,13 +177,56 @@ void putback_movable_pages(struct list_head *l) > } > } > > +static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw, > + struct folio *folio, > + unsigned long idx) > +{ > + struct page *page = folio_page(folio, idx); > + bool contains_data; > + pte_t newpte; > + void *addr; > + > + VM_BUG_ON_PAGE(PageCompound(page), page); > + VM_BUG_ON_PAGE(!PageAnon(page), page); > + VM_BUG_ON_PAGE(!PageLocked(page), page); > + VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page); > + > + if (PageMlocked(page) || (pvmw->vma->vm_flags & VM_LOCKED)) > + return false; > + > + /* > + * The pmd entry mapping the old thp was flushed and the pte mapping > + * this subpage has been non present. If the subpage is only zero-filled > + * then map it to the shared zeropage. > + */ > + addr = kmap_local_page(page); > + contains_data = memchr_inv(addr, 0, PAGE_SIZE); > + kunmap_local(addr); > + > + if (contains_data || mm_forbids_zeropage(pvmw->vma->vm_mm)) > + return false; > + > + newpte = pte_mkspecial(pfn_pte(page_to_pfn(ZERO_PAGE(pvmw->address)), > + pvmw->vma->vm_page_prot)); Why not use my_zero_pfn() here? On many configurations this just returns zero_pfn and avoids the indirection through mem_map. > @@ -904,7 +958,7 @@ static int writeout(struct address_space *mapping, struct folio *folio) > * At this point we know that the migration attempt cannot > * be successful. > */ > - remove_migration_ptes(folio, folio, false); > + remove_migration_ptes(folio, folio, false, false); bool params are not great for callsite readability. How about a flags parameter and using names? enum rmp_flags { RMP_LOCKED = 1 << 0, RMP_ZEROPAGES = 1 << 1, }