From: Nadav Amit <namit@xxxxxxxxxx> commit a4a118f2eead1d6c49e00765de89878288d4b890 upstream. When __unmap_hugepage_range() calls to huge_pmd_unshare() succeed, a TLB flush is missing. This TLB flush must be performed before releasing the i_mmap_rwsem, in order to prevent an unshared PMDs page from being released and reused before the TLB flush took place. Arguably, a comprehensive solution would use mmu_gather interface to batch the TLB flushes and the PMDs page release, however it is not an easy solution: (1) try_to_unmap_one() and try_to_migrate_one() also call huge_pmd_unshare() and they cannot use the mmu_gather interface; and (2) deferring the release of the page reference for the PMDs page until after i_mmap_rwsem is dropeed can confuse huge_pmd_unshare() into thinking PMDs are shared when they are not. Fix __unmap_hugepage_range() by adding the missing TLB flush, and forcing a flush when unshare is successful. Fixes: 24669e58477e ("hugetlb: use mmu_gather instead of a temporary linked list for accumulating pages)" # 3.6 Signed-off-by: Nadav Amit <namit@xxxxxxxxxx> Reviewed-by: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxxxxxxx> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@xxxxxxxxxxxxxx> Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx> Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx> --- arch/arm/include/asm/tlb.h | 8 ++++++++ arch/ia64/include/asm/tlb.h | 10 ++++++++++ arch/s390/include/asm/tlb.h | 14 ++++++++++++++ arch/sh/include/asm/tlb.h | 10 ++++++++++ arch/um/include/asm/tlb.h | 12 ++++++++++++ include/asm-generic/tlb.h | 2 ++ mm/hugetlb.c | 19 +++++++++++++++++++ mm/memory.c | 10 ++++++++++ 8 files changed, 85 insertions(+) --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -280,6 +280,14 @@ tlb_remove_pmd_tlb_entry(struct mmu_gath tlb_add_flush(tlb, addr); } +static inline void +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, + unsigned long size) +{ + tlb_add_flush(tlb, address); + tlb_add_flush(tlb, address + size - PMD_SIZE); +} + #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) #define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr) #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp) --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h @@ -269,6 +269,16 @@ __tlb_remove_tlb_entry (struct mmu_gathe tlb->end_addr = address + PAGE_SIZE; } +static inline void +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, + unsigned long size) +{ + if (tlb->start_addr > address) + tlb->start_addr = address; + if (tlb->end_addr < address + size) + tlb->end_addr = address + size; +} + #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) #define tlb_start_vma(tlb, vma) do { } while (0) --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -116,6 +116,20 @@ static inline void tlb_remove_page_size( return tlb_remove_page(tlb, page); } +static inline void tlb_flush_pmd_range(struct mmu_gather *tlb, + unsigned long address, unsigned long size) +{ + /* + * the range might exceed the original range that was provided to + * tlb_gather_mmu(), so we need to update it despite the fact it is + * usually not updated. + */ + if (tlb->start > address) + tlb->start = address; + if (tlb->end < address + size) + tlb->end = address + size; +} + /* * pte_free_tlb frees a pte table and clears the CRSTE for the * page table from the tlb. --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -127,6 +127,16 @@ static inline void tlb_remove_page_size( return tlb_remove_page(tlb, page); } +static inline void +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, + unsigned long size) +{ + if (tlb->start > address) + tlb->start = address; + if (tlb->end < address + size) + tlb->end = address + size; +} + #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, unsigned int page_size) --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h @@ -130,6 +130,18 @@ static inline void tlb_remove_page_size( return tlb_remove_page(tlb, page); } +static inline void +tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, + unsigned long size) +{ + tlb->need_flush = 1; + + if (tlb->start > address) + tlb->start = address; + if (tlb->end < address + size) + tlb->end = address + size; +} + /** * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. * --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -117,6 +117,8 @@ void arch_tlb_gather_mmu(struct mmu_gath void tlb_flush_mmu(struct mmu_gather *tlb); void arch_tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end, bool force); +void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, + unsigned long size); extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size); --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3386,6 +3386,7 @@ void __unmap_hugepage_range(struct mmu_g unsigned long sz = huge_page_size(h); const unsigned long mmun_start = start; /* For mmu_notifiers */ const unsigned long mmun_end = end; /* For mmu_notifiers */ + bool force_flush = false; WARN_ON(!is_vm_hugetlb_page(vma)); BUG_ON(start & ~huge_page_mask(h)); @@ -3407,6 +3408,8 @@ void __unmap_hugepage_range(struct mmu_g ptl = huge_pte_lock(h, mm, ptep); if (huge_pmd_unshare(mm, &address, ptep)) { spin_unlock(ptl); + tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE); + force_flush = true; continue; } @@ -3463,6 +3466,22 @@ void __unmap_hugepage_range(struct mmu_g } mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); tlb_end_vma(tlb, vma); + + /* + * If we unshared PMDs, the TLB flush was not recorded in mmu_gather. We + * could defer the flush until now, since by holding i_mmap_rwsem we + * guaranteed that the last refernece would not be dropped. But we must + * do the flushing before we return, as otherwise i_mmap_rwsem will be + * dropped and the last reference to the shared PMDs page might be + * dropped as well. + * + * In theory we could defer the freeing of the PMD pages as well, but + * huge_pmd_unshare() relies on the exact page_count for the PMD page to + * detect sharing, so we cannot defer the release of the page either. + * Instead, do flush now. + */ + if (force_flush) + tlb_flush_mmu(tlb); } void __unmap_hugepage_range_final(struct mmu_gather *tlb, --- a/mm/memory.c +++ b/mm/memory.c @@ -335,6 +335,16 @@ bool __tlb_remove_page_size(struct mmu_g return false; } +void tlb_flush_pmd_range(struct mmu_gather *tlb, unsigned long address, + unsigned long size) +{ + if (tlb->page_size != 0 && tlb->page_size != PMD_SIZE) + tlb_flush_mmu(tlb); + + tlb->page_size = PMD_SIZE; + tlb->start = min(tlb->start, address); + tlb->end = max(tlb->end, address + size); +} #endif /* HAVE_GENERIC_MMU_GATHER */ #ifdef CONFIG_HAVE_RCU_TABLE_FREE