mmu_notifier_invalidate_range_start(&range);
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
+
/*
* This removes any huge TLB entry from the CPU so we won't allow
* huge and small TLB entries for the same virtual address to
@@ -1216,10 +1220,10 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
mmu_notifier_invalidate_range_end(&range);
tlb_remove_table_sync_one();
- pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl);
+ pte = pte_offset_map_lock(mm, &_pmd, _address, &pte_ptl);
if (pte) {
- result = __collapse_huge_page_isolate(vma, address, pte, cc,
- &compound_pagelist, HPAGE_PMD_ORDER);
+ result = __collapse_huge_page_isolate(vma, _address, pte, cc,
+ &compound_pagelist, order);
spin_unlock(pte_ptl);
} else {
result = SCAN_PMD_NULL;
@@ -1248,8 +1252,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
anon_vma_unlock_write(vma->anon_vma);
result = __collapse_huge_page_copy(pte, folio, pmd, _pmd,
- vma, address, pte_ptl,
- &compound_pagelist, HPAGE_PMD_ORDER);
+ vma, _address, pte_ptl,
+ &compound_pagelist, order);
pte_unmap(pte);
if (unlikely(result != SCAN_SUCCEED))
goto out_up_write;
@@ -1260,20 +1264,37 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
* write.
*/
__folio_mark_uptodate(folio);
- pgtable = pmd_pgtable(_pmd);
-
- _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
- _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
-
- spin_lock(pmd_ptl);
- BUG_ON(!pmd_none(*pmd));
- folio_add_new_anon_rmap(folio, vma, address, RMAP_EXCLUSIVE);
- folio_add_lru_vma(folio, vma);
- pgtable_trans_huge_deposit(mm, pmd, pgtable);
- set_pmd_at(mm, address, pmd, _pmd);
- update_mmu_cache_pmd(vma, address, pmd);
- deferred_split_folio(folio, false);
- spin_unlock(pmd_ptl);
+ if (order == HPAGE_PMD_ORDER) {
+ pgtable = pmd_pgtable(_pmd);
+ _pmd = mk_huge_pmd(&folio->page, vma->vm_page_prot);
+ _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
+
+ spin_lock(pmd_ptl);
+ BUG_ON(!pmd_none(*pmd));
+ folio_add_new_anon_rmap(folio, vma, _address, RMAP_EXCLUSIVE);
+ folio_add_lru_vma(folio, vma);
+ pgtable_trans_huge_deposit(mm, pmd, pgtable);
+ set_pmd_at(mm, address, pmd, _pmd);
+ update_mmu_cache_pmd(vma, address, pmd);
+ deferred_split_folio(folio, false);
+ spin_unlock(pmd_ptl);
+ } else { //mTHP
+ mthp_pte = mk_pte(&folio->page, vma->vm_page_prot);
+ mthp_pte = maybe_mkwrite(pte_mkdirty(mthp_pte), vma);
+
+ spin_lock(pmd_ptl);
+ folio_ref_add(folio, (1 << order) - 1);
+ folio_add_new_anon_rmap(folio, vma, _address, RMAP_EXCLUSIVE);
+ folio_add_lru_vma(folio, vma);
+ spin_lock(pte_ptl);
+ set_ptes(vma->vm_mm, _address, pte, mthp_pte, (1 << order));
+ update_mmu_cache_range(NULL, vma, _address, pte, (1 << order));
+ spin_unlock(pte_ptl);
+ smp_wmb(); /* make pte visible before pmd */
+ pmd_populate(mm, pmd, pmd_pgtable(_pmd));
+ deferred_split_folio(folio, false);
+ spin_unlock(pmd_ptl);
+ }
folio = NULL;
@@ -1353,21 +1374,27 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
{
pmd_t *pmd;
pte_t *pte, *_pte;
+ int i;
int result = SCAN_FAIL, referenced = 0;
int none_or_zero = 0, shared = 0;
struct page *page = NULL;
struct folio *folio = NULL;
unsigned long _address;
+ unsigned long enabled_orders;
spinlock_t *ptl;
int node = NUMA_NO_NODE, unmapped = 0;
bool writable = false;
-
+ int chunk_none_count = 0;
+ int scaled_none = khugepaged_max_ptes_none >> (HPAGE_PMD_ORDER - MIN_MTHP_ORDER);
+ unsigned long tva_flags = cc->is_khugepaged ? TVA_ENFORCE_SYSFS : 0;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
result = find_pmd_or_thp_or_none(mm, address, &pmd);
if (result != SCAN_SUCCEED)
goto out;
+ bitmap_zero(cc->mthp_bitmap, MAX_MTHP_BITMAP_SIZE);
+ bitmap_zero(cc->mthp_bitmap_temp, MAX_MTHP_BITMAP_SIZE);
memset(cc->node_load, 0, sizeof(cc->node_load));
nodes_clear(cc->alloc_nmask);
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -1376,8 +1403,12 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
goto out;
}
- for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR;
- _pte++, _address += PAGE_SIZE) {
+ for (i = 0; i < HPAGE_PMD_NR; i++) {
+ if (i % MIN_MTHP_NR == 0)
+ chunk_none_count = 0;
+
+ _pte = pte + i;
+ _address = address + i * PAGE_SIZE;
pte_t pteval = ptep_get(_pte);
if (is_swap_pte(pteval)) {
++unmapped;
@@ -1400,16 +1431,14 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
}
}
if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
+ ++chunk_none_count;
++none_or_zero;
- if (!userfaultfd_armed(vma) &&
- (!cc->is_khugepaged ||
- none_or_zero <= khugepaged_max_ptes_none)) {
- continue;
- } else {
+ if (userfaultfd_armed(vma)) {
result = SCAN_EXCEED_NONE_PTE;
count_vm_event(THP_SCAN_EXCEED_NONE_PTE);
goto out_unmap;
}
+ continue;
}
if (pte_uffd_wp(pteval)) {
/*
@@ -1500,7 +1529,16 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
folio_test_referenced(folio) || mmu_notifier_test_young(vma->vm_mm,
address)))
referenced++;
+
+ /*
+ * we are reading in MIN_MTHP_NR page chunks. if there are no empty
+ * pages keep track of it in the bitmap for mTHP collapsing.
+ */
+ if (chunk_none_count < scaled_none &&
+ (i + 1) % MIN_MTHP_NR == 0)
+ bitmap_set(cc->mthp_bitmap, i / MIN_MTHP_NR, 1);
}
+
if (!writable) {
result = SCAN_PAGE_RO;
} else if (cc->is_khugepaged &&
@@ -1513,10 +1551,14 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
out_unmap:
pte_unmap_unlock(pte, ptl);
if (result == SCAN_SUCCEED) {
- result = collapse_huge_page(mm, address, referenced,
- unmapped, cc, mmap_locked, HPAGE_PMD_ORDER, 0);
- /* collapse_huge_page will return with the mmap_lock released */
- *mmap_locked = false;
+ enabled_orders = thp_vma_allowable_orders(vma, vma->vm_flags,
+ tva_flags, THP_ORDERS_ALL_ANON);
+ result = khugepaged_scan_bitmap(mm, address, referenced, unmapped, cc,
+ mmap_locked, enabled_orders);
+ if (result > 0)
+ result = SCAN_SUCCEED;
+ else
+ result = SCAN_FAIL;
}
out:
trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
@@ -2476,11 +2518,13 @@ static int khugepaged_collapse_single_pmd(unsigned long addr, struct mm_struct *
fput(file);
if (result == SCAN_PTE_MAPPED_HUGEPAGE) {
mmap_read_lock(mm);
+ *mmap_locked = true;
if (khugepaged_test_exit_or_disable(mm))
goto end;
result = collapse_pte_mapped_thp(mm, addr,
!cc->is_khugepaged);
mmap_read_unlock(mm);
+ *mmap_locked = false;
}
} else {
result = khugepaged_scan_pmd(mm, vma, addr,