Both madvise(MADV_COLLAPSE) and khugepaged can collapse a contiguous THP-sized memory region mapped as PTEs into a THP. If metadata is enabled for the VMA where the PTEs are mapped, make sure to allocate metadata storage for the compound page that will be replacing them. Signed-off-by: Alexandru Elisei <alexandru.elisei@xxxxxxx> --- arch/arm64/include/asm/memory_metadata.h | 7 +++++++ include/asm-generic/memory_metadata.h | 4 ++++ mm/khugepaged.c | 7 +++++++ 3 files changed, 18 insertions(+) diff --git a/arch/arm64/include/asm/memory_metadata.h b/arch/arm64/include/asm/memory_metadata.h index 1b18e3217dd0..ade37331a5c8 100644 --- a/arch/arm64/include/asm/memory_metadata.h +++ b/arch/arm64/include/asm/memory_metadata.h @@ -5,6 +5,8 @@ #ifndef __ASM_MEMORY_METADATA_H #define __ASM_MEMORY_METADATA_H +#include <linux/mm.h> + #include <asm-generic/memory_metadata.h> #include <asm/mte.h> @@ -40,6 +42,11 @@ static inline int reserve_metadata_storage(struct page *page, int order, gfp_t g static inline void free_metadata_storage(struct page *page, int order) { } + +static inline bool vma_has_metadata(struct vm_area_struct *vma) +{ + return vma && (vma->vm_flags & VM_MTE); +} #endif /* CONFIG_MEMORY_METADATA */ #endif /* __ASM_MEMORY_METADATA_H */ diff --git a/include/asm-generic/memory_metadata.h b/include/asm-generic/memory_metadata.h index 111d6edc0997..35a0d6a8b5fc 100644 --- a/include/asm-generic/memory_metadata.h +++ b/include/asm-generic/memory_metadata.h @@ -35,6 +35,10 @@ static inline bool folio_has_metadata(struct folio *folio) { return false; } +static inline bool vma_has_metadata(struct vm_area_struct *vma) +{ + return false; +} #endif /* !CONFIG_MEMORY_METADATA */ #endif /* __ASM_GENERIC_MEMORY_METADATA_H */ diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 78c8d5d8b628..174710d941c2 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -20,6 +20,7 @@ #include <linux/swapops.h> #include <linux/shmem_fs.h> +#include <asm/memory_metadata.h> #include <asm/tlb.h> #include <asm/pgalloc.h> #include "internal.h" @@ -96,6 +97,7 @@ static struct kmem_cache *mm_slot_cache __read_mostly; struct collapse_control { bool is_khugepaged; + bool has_metadata; /* Num pages scanned per node */ u32 node_load[MAX_NUMNODES]; @@ -1069,6 +1071,9 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, int node = hpage_collapse_find_target_node(cc); struct folio *folio; + if (cc->has_metadata) + gfp |= __GFP_TAGGED; + if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask)) return SCAN_ALLOC_HUGE_PAGE_FAIL; @@ -2497,6 +2502,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, if (khugepaged_scan.address < hstart) khugepaged_scan.address = hstart; VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); + cc->has_metadata = vma_has_metadata(vma); while (khugepaged_scan.address < hend) { bool mmap_locked = true; @@ -2838,6 +2844,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev, if (!cc) return -ENOMEM; cc->is_khugepaged = false; + cc->has_metadata = vma_has_metadata(vma); mmgrab(mm); lru_add_drain_all(); -- 2.41.0