-static inline void __page_dup_rmap(struct page *page, bool compound)
+static inline int __folio_try_dup_anon_rmap(struct folio *folio,
+ struct page *page, unsigned int nr_pages,
+ struct vm_area_struct *src_vma, enum rmap_mode mode)
{
- VM_WARN_ON(folio_test_hugetlb(page_folio(page)));
+ int i;
- if (compound) {
- struct folio *folio = (struct folio *)page;
+ VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
- VM_BUG_ON_PAGE(compound && !PageHead(page), page);
- atomic_inc(&folio->_entire_mapcount);
- } else {
- atomic_inc(&page->_mapcount);
+ /*
+ * No need to check+clear for already shared PTEs/PMDs of the folio.
+ * This includes PTE mappings of (order-0) KSM folios.
+ */
+ if (likely(mode == RMAP_MODE_PTE)) {
+ for (i = 0; i < nr_pages; i++) {
+ if (PageAnonExclusive(page + i))
+ goto clear;
+ }
+ } else if (mode == RMAP_MODE_PMD) {
+ if (PageAnonExclusive(page))
+ goto clear;
}
+ goto dup;
+
+clear:
+ /*
+ * If this folio may have been pinned by the parent process,
+ * don't allow to duplicate the mappings but instead require to e.g.,
+ * copy the subpage immediately for the child so that we'll always
+ * guarantee the pinned folio won't be randomly replaced in the
+ * future on write faults.
+ */
+ if (likely(!folio_is_device_private(folio) &&
+ unlikely(folio_needs_cow_for_dma(src_vma, folio))))
+ return -EBUSY;
+
+ if (likely(mode == RMAP_MODE_PTE)) {
+ for (i = 0; i < nr_pages; i++)
+ ClearPageAnonExclusive(page + i);
+ } else if (mode == RMAP_MODE_PMD) {
+ ClearPageAnonExclusive(page);
+ }
+
+dup:
+ __folio_dup_rmap(folio, page, nr_pages, mode);
+ return 0;
Playing with this, I think it can be implemented more efficiently by
only looping once and optimizing for the common case that PAE is set.
Will have to do some more measurements.
--
Cheers,
David / dhildenb