On Sat, Aug 31, 2024 at 9:59 PM David Hildenbrand <david@xxxxxxxxxx> wrote: > > > > + idx = folio_page_idx(folio, vmf->page); > > + folio_start = address - idx * PAGE_SIZE; > > + folio_end = folio_start + nr * PAGE_SIZE; > > + > > + if (unlikely(folio_start < max(address & PMD_MASK, vma->vm_start))) > > + return false; > > + if (unlikely(folio_end > pmd_addr_end(address, vma->vm_end))) > > + return false; > > + folio_ptep = vmf->pte - idx; > > + folio_pte = ptep_get(folio_ptep); > > + if (!pte_present(folio_pte) || pte_pfn(folio_pte) != folio_pfn(folio)) > > + return false; > > + if (folio_pte_batch(folio, folio_start, folio_ptep, folio_pte, nr, 0, > > + NULL, NULL, NULL) != nr) > > + return false; > > + if (folio_mapcount(folio) != nr) > > + return false; > > BTW, you're not checking against the refcount (and it's all a bit racy > on concurrent unmapping!). So you're re-introducing the vmsplice > child->parent attak. i don't quite understand this, you mean the below is not enough? - if (folio_test_ksm(folio) || folio_ref_count(folio) > 3) + if (folio_test_ksm(folio) || folio_ref_count(folio) > 2 + nr) return false; if (!folio_test_lru(folio)) /* @@ -3591,13 +3627,13 @@ static bool wp_can_reuse_anon_folio(struct folio *folio, * remote LRU caches or references to LRU folios. */ lru_add_drain(); - if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) + if (folio_ref_count(folio) > nr + folio_test_swapcache(folio)) return false; if (!folio_trylock(folio)) return false; if (folio_test_swapcache(folio)) folio_free_swap(folio); - if (folio_test_ksm(folio) || folio_ref_count(folio) != 1) { + if (folio_test_ksm(folio) || folio_ref_count(folio) != nr) { folio_unlock(folio); return false; > > -- > Cheers, > > David / dhildenb >