This patch exports memory management functions that are useful to memory managers, so that they can be used in memory management filesystems created in kernel modules. Signed-off-by: Bijan Tabatabai <btabatabai@xxxxxxxx> --- arch/x86/include/asm/tlbflush.h | 2 -- arch/x86/mm/tlb.c | 1 + mm/filemap.c | 2 ++ mm/memory.c | 1 + mm/mmap.c | 2 ++ mm/pgtable-generic.c | 1 + mm/rmap.c | 2 ++ 7 files changed, 9 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 25726893c6f4..9877176d396f 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -57,7 +57,6 @@ static inline void cr4_clear_bits(unsigned long mask) local_irq_restore(flags); } -#ifndef MODULE /* * 6 because 6 should be plenty and struct tlb_state will fit in two cache * lines. @@ -417,7 +416,6 @@ static inline void set_tlbstate_lam_mode(struct mm_struct *mm) { } #endif -#endif /* !MODULE */ static inline void __native_tlb_flush_global(unsigned long cr4) { diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 44ac64f3a047..f054cee7bc7c 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -1036,6 +1036,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, put_cpu(); mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end); } +EXPORT_SYMBOL_GPL(flush_tlb_mm_range); static void do_flush_tlb_all(void *info) diff --git a/mm/filemap.c b/mm/filemap.c index 657bcd887fdb..8532ddd37e7f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -269,6 +269,7 @@ void filemap_remove_folio(struct folio *folio) filemap_free_folio(mapping, folio); } +EXPORT_SYMBOL_GPL(filemap_remove_folio); /* * page_cache_delete_batch - delete several folios from page cache @@ -955,6 +956,7 @@ noinline int __filemap_add_folio(struct address_space *mapping, return xas_error(&xas); } ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO); +EXPORT_SYMBOL_GPL(__filemap_add_folio); int filemap_add_folio(struct address_space *mapping, struct folio *folio, pgoff_t index, gfp_t gfp) diff --git a/mm/memory.c b/mm/memory.c index fa2fe3ee0867..23e74a0397fa 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -448,6 +448,7 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd) pte_free(mm, new); return 0; } +EXPORT_SYMBOL_GPL(__pte_alloc); int __pte_alloc_kernel(pmd_t *pmd) { diff --git a/mm/mmap.c b/mm/mmap.c index d684d8bd218b..1090ef982929 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1780,6 +1780,7 @@ generic_get_unmapped_area(struct file *filp, unsigned long addr, info.high_limit = mmap_end; return vm_unmapped_area(&info); } +EXPORT_SYMBOL_GPL(generic_get_unmapped_area); #ifndef HAVE_ARCH_UNMAPPED_AREA unsigned long @@ -1844,6 +1845,7 @@ generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, return addr; } +EXPORT_SYMBOL_GPL(generic_get_unmapped_area_topdown); #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN unsigned long diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index a78a4adf711a..1a3b4a86b005 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -304,6 +304,7 @@ pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp) rcu_read_unlock(); return NULL; } +EXPORT_SYMBOL_GPL(__pte_offset_map); pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, spinlock_t **ptlp) diff --git a/mm/rmap.c b/mm/rmap.c index e8fc5ecb59b2..fdade910cc95 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1468,6 +1468,7 @@ void folio_add_file_rmap_ptes(struct folio *folio, struct page *page, { __folio_add_file_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); } +EXPORT_SYMBOL_GPL(folio_add_file_rmap_ptes); /** * folio_add_file_rmap_pmd - add a PMD mapping to a page range of a folio @@ -1594,6 +1595,7 @@ void folio_remove_rmap_ptes(struct folio *folio, struct page *page, { __folio_remove_rmap(folio, page, nr_pages, vma, RMAP_LEVEL_PTE); } +EXPORT_SYMBOL_GPL(folio_remove_rmap_ptes); /** * folio_remove_rmap_pmd - remove a PMD mapping from a page range of a folio -- 2.34.1