The patch titled Subject: mm: use vma_init() to initialize VMAs on stack and data segments has been added to the -mm tree. Its filename is mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments.patch This patch should soon appear at http://ozlabs.org/~akpm/mmots/broken-out/mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments.patch and later at http://ozlabs.org/~akpm/mmotm/broken-out/mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments.patch Before you just go and hit "reply", please: a) Consider who else should be cc'ed b) Prefer to cc a suitable mailing list as well c) Ideally: find the original patch on the mailing list and do a reply-to-all to that, adding suitable additional cc's *** Remember to use Documentation/process/submit-checklist.rst when testing your code *** The -mm tree is included into linux-next and is updated there every 3-4 working days ------------------------------------------------------ From: "Kirill A. Shutemov" <kirill.shutemov@xxxxxxxxxxxxxxx> Subject: mm: use vma_init() to initialize VMAs on stack and data segments Make sure to initialize all VMAs properly, not only those which come from vm_area_cachep. Link: http://lkml.kernel.org/r/20180724121139.62570-3-kirill.shutemov@xxxxxxxxxxxxxxx Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> Acked-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Reviewed-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Cc: Dmitry Vyukov <dvyukov@xxxxxxxxxx> Cc: Oleg Nesterov <oleg@xxxxxxxxxx> Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx> Cc: <stable@xxxxxxxxxxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- arch/arm/kernel/process.c | 1 + arch/arm/mach-rpc/ecard.c | 2 +- arch/arm64/include/asm/tlb.h | 4 +++- arch/arm64/mm/hugetlbpage.c | 7 +++++-- arch/ia64/include/asm/tlb.h | 2 +- arch/ia64/mm/init.c | 2 +- arch/x86/um/mem_32.c | 2 +- fs/hugetlbfs/inode.c | 2 ++ mm/mempolicy.c | 1 + mm/shmem.c | 1 + 10 files changed, 17 insertions(+), 7 deletions(-) diff -puN arch/arm64/include/asm/tlb.h~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments arch/arm64/include/asm/tlb.h --- a/arch/arm64/include/asm/tlb.h~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments +++ a/arch/arm64/include/asm/tlb.h @@ -37,7 +37,9 @@ static inline void __tlb_remove_table(vo static inline void tlb_flush(struct mmu_gather *tlb) { - struct vm_area_struct vma = { .vm_mm = tlb->mm, }; + struct vm_area_struct vma; + + vma_init(&vma, tlb->mm); /* * The ASID allocator will either invalidate the ASID or mark diff -puN arch/arm64/mm/hugetlbpage.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments arch/arm64/mm/hugetlbpage.c --- a/arch/arm64/mm/hugetlbpage.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments +++ a/arch/arm64/mm/hugetlbpage.c @@ -108,11 +108,13 @@ static pte_t get_clear_flush(struct mm_s unsigned long pgsize, unsigned long ncontig) { - struct vm_area_struct vma = { .vm_mm = mm }; + struct vm_area_struct vma; pte_t orig_pte = huge_ptep_get(ptep); bool valid = pte_valid(orig_pte); unsigned long i, saddr = addr; + vma_init(&vma, mm); + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { pte_t pte = ptep_get_and_clear(mm, addr, ptep); @@ -145,9 +147,10 @@ static void clear_flush(struct mm_struct unsigned long pgsize, unsigned long ncontig) { - struct vm_area_struct vma = { .vm_mm = mm }; + struct vm_area_struct vma; unsigned long i, saddr = addr; + vma_init(&vma, mm); for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) pte_clear(mm, addr, ptep); diff -puN arch/arm/kernel/process.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments arch/arm/kernel/process.c --- a/arch/arm/kernel/process.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments +++ a/arch/arm/kernel/process.c @@ -338,6 +338,7 @@ static struct vm_area_struct gate_vma = static int __init gate_vma_init(void) { + vma_init(&gate_vma, NULL); gate_vma.vm_page_prot = PAGE_READONLY_EXEC; return 0; } diff -puN arch/arm/mach-rpc/ecard.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments arch/arm/mach-rpc/ecard.c --- a/arch/arm/mach-rpc/ecard.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments +++ a/arch/arm/mach-rpc/ecard.c @@ -237,8 +237,8 @@ static void ecard_init_pgtables(struct m memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE)); + vma_init(&vma, mm); vma.vm_flags = VM_EXEC; - vma.vm_mm = mm; flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE); flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE); diff -puN arch/ia64/include/asm/tlb.h~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments arch/ia64/include/asm/tlb.h --- a/arch/ia64/include/asm/tlb.h~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments +++ a/arch/ia64/include/asm/tlb.h @@ -120,7 +120,7 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_ga */ struct vm_area_struct vma; - vma.vm_mm = tlb->mm; + vma_init(&vma, tlb->mm); /* flush the address range from the tlb: */ flush_tlb_range(&vma, start, end); /* now flush the virt. page-table area mapping the address range: */ diff -puN arch/ia64/mm/init.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments arch/ia64/mm/init.c --- a/arch/ia64/mm/init.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments +++ a/arch/ia64/mm/init.c @@ -273,7 +273,7 @@ static struct vm_area_struct gate_vma; static int __init gate_vma_init(void) { - gate_vma.vm_mm = NULL; + vma_init(&gate_vma, NULL); gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; diff -puN arch/x86/um/mem_32.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments arch/x86/um/mem_32.c --- a/arch/x86/um/mem_32.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments +++ a/arch/x86/um/mem_32.c @@ -16,7 +16,7 @@ static int __init gate_vma_init(void) if (!FIXADDR_USER_START) return 0; - gate_vma.vm_mm = NULL; + vma_init(&gate_vma, NULL); gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; diff -puN fs/hugetlbfs/inode.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments fs/hugetlbfs/inode.c --- a/fs/hugetlbfs/inode.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments +++ a/fs/hugetlbfs/inode.c @@ -411,6 +411,7 @@ static void remove_inode_hugepages(struc bool truncate_op = (lend == LLONG_MAX); memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); + vma_init(&pseudo_vma, current->mm); pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); pagevec_init(&pvec); next = start; @@ -595,6 +596,7 @@ static long hugetlbfs_fallocate(struct f * as input to create an allocation policy. */ memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); + vma_init(&pseudo_vma, mm); pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); pseudo_vma.vm_file = file; diff -puN mm/mempolicy.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments mm/mempolicy.c --- a/mm/mempolicy.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments +++ a/mm/mempolicy.c @@ -2505,6 +2505,7 @@ void mpol_shared_policy_init(struct shar /* Create pseudo-vma that contains just the policy */ memset(&pvma, 0, sizeof(struct vm_area_struct)); + vma_init(&pvma, NULL); pvma.vm_end = TASK_SIZE; /* policy covers entire file */ mpol_set_shared_policy(sp, &pvma, new); /* adds ref */ diff -puN mm/shmem.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments mm/shmem.c --- a/mm/shmem.c~mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments +++ a/mm/shmem.c @@ -1421,6 +1421,7 @@ static void shmem_pseudo_vma_init(struct { /* Create a pseudo vma that just contains the policy */ memset(vma, 0, sizeof(*vma)); + vma_init(vma, NULL); /* Bias interleave by inode number to distribute better across nodes */ vma->vm_pgoff = index + info->vfs_inode.i_ino; vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index); _ Patches currently in -mm which might be from kirill.shutemov@xxxxxxxxxxxxxxx are mm-introduce-vma_init.patch mm-use-vma_init-to-initialize-vmas-on-stack-and-data-segments.patch mm-fix-vma_is_anonymous-false-positives.patch mm-page_ext-drop-definition-of-unused-page_ext_debug_poison.patch mm-page_ext-constify-lookup_page_ext-argument.patch -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html