+
static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start,
bool flush)
{
pmd_t __pmd;
@@ -87,9 +118,9 @@ static int split_vmemmap_huge_pmd(pmd_t *pmd,
unsigned long start, bool flush)
/* Make pte visible before pmd. See comment in
pmd_install(). */
smp_wmb();
- pmd_populate_kernel(&init_mm, pmd, pgtable);
+ vmemmap_update_pmd(start, pmd, pgtable);
if (flush)
- flush_tlb_kernel_range(start, start + PMD_SIZE);
+ flush_tlb_vmemmap_range(start, start + PMD_SIZE);
} else {
pte_free_kernel(&init_mm, pgtable);
}
@@ -217,7 +248,7 @@ static int vmemmap_remap_range(unsigned long
start, unsigned long end,
} while (pgd++, addr = next, addr != end);
if (walk->remap_pte && !(walk->flags &
VMEMMAP_REMAP_NO_TLB_FLUSH))
- flush_tlb_kernel_range(start, end);
+ flush_tlb_vmemmap_range(start, end);
return 0;
}
@@ -263,15 +294,15 @@ static void vmemmap_remap_pte(pte_t *pte,
unsigned long addr,
/*
* Makes sure that preceding stores to the page contents from
- * vmemmap_remap_free() become visible before the set_pte_at()
- * write.
+ * vmemmap_remap_free() become visible before the
+ * vmemmap_update_pte() write.
*/
smp_wmb();
}
entry = mk_pte(walk->reuse_page, pgprot);
list_add(&page->lru, walk->vmemmap_pages);
- set_pte_at(&init_mm, addr, pte, entry);
+ vmemmap_update_pte(addr, pte, entry);
}
/*
@@ -310,10 +341,10 @@ static void vmemmap_restore_pte(pte_t *pte,
unsigned long addr,
/*
* Makes sure that preceding stores to the page contents become
visible
- * before the set_pte_at() write.
+ * before the vmemmap_update_pte() write.
*/
smp_wmb();
- set_pte_at(&init_mm, addr, pte, mk_pte(page, pgprot));
+ vmemmap_update_pte(addr, pte, mk_pte(page, pgprot));
}
/**
@@ -576,7 +607,7 @@ long hugetlb_vmemmap_restore_folios(const struct
hstate *h,
}
if (restored)
- flush_tlb_all();
+ flush_tlb_vmemmap_all();
if (!ret)
ret = restored;
return ret;
@@ -744,7 +775,7 @@ void hugetlb_vmemmap_optimize_folios(struct
hstate *h, struct list_head *folio_l
break;
}
- flush_tlb_all();
+ flush_tlb_vmemmap_all();
list_for_each_entry(folio, folio_list, lru) {
int ret = __hugetlb_vmemmap_optimize_folio(h, folio,
@@ -760,7 +791,7 @@ void hugetlb_vmemmap_optimize_folios(struct
hstate *h, struct list_head *folio_l
* allowing more vmemmap remaps to occur.
*/
if (ret == -ENOMEM && !list_empty(&vmemmap_pages)) {
- flush_tlb_all();
+ flush_tlb_vmemmap_all();
free_vmemmap_page_list(&vmemmap_pages);
INIT_LIST_HEAD(&vmemmap_pages);
__hugetlb_vmemmap_optimize_folio(h, folio,
@@ -769,7 +800,7 @@ void hugetlb_vmemmap_optimize_folios(struct
hstate *h, struct list_head *folio_l
}
}
- flush_tlb_all();
+ flush_tlb_vmemmap_all();
free_vmemmap_page_list(&vmemmap_pages);
}