In order to validate that this failure is reasonable, we need to know whether allocation request is for reserved or not on caller function. So moving vma_needs_reservation() up to the caller of alloc_huge_page(). There is no functional change in this patch and following patch use this information. Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxxxxxxx> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9927407..d960f46 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1177,13 +1177,11 @@ static void vma_commit_reservation(struct hstate *h, } static struct page *alloc_huge_page(struct vm_area_struct *vma, - unsigned long addr, int avoid_reserve) + unsigned long addr, int use_reserve) { struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct page *page; - long chg; - bool use_reserve; int ret, idx; struct hugetlb_cgroup *h_cg; @@ -1196,10 +1194,6 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, * need pages and subpool limit allocated allocated if no reserve * mapping overlaps. */ - chg = vma_needs_reservation(h, vma, addr); - if (chg < 0) - return ERR_PTR(-ENOMEM); - use_reserve = (!chg && !avoid_reserve); if (!use_reserve) if (hugepage_subpool_get_pages(spool, 1)) return ERR_PTR(-ENOSPC); @@ -1244,7 +1238,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { - struct page *page = alloc_huge_page(vma, addr, avoid_reserve); + struct page *page = alloc_huge_page(vma, addr, !avoid_reserve); if (IS_ERR(page)) page = NULL; return page; @@ -2581,6 +2575,8 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, struct hstate *h = hstate_vma(vma); struct page *old_page, *new_page; int outside_reserve = 0; + long chg; + bool use_reserve; unsigned long mmun_start; /* For mmu_notifiers */ unsigned long mmun_end; /* For mmu_notifiers */ @@ -2612,7 +2608,17 @@ retry_avoidcopy: /* Drop page table lock as buddy allocator may be called */ spin_unlock(ptl); - new_page = alloc_huge_page(vma, address, outside_reserve); + chg = vma_needs_reservation(h, vma, address); + if (chg < 0) { + page_cache_release(old_page); + + /* Caller expects lock to be held */ + spin_lock(ptl); + return VM_FAULT_OOM; + } + use_reserve = !chg && !outside_reserve; + + new_page = alloc_huge_page(vma, address, use_reserve); if (IS_ERR(new_page)) { long err = PTR_ERR(new_page); @@ -2742,6 +2748,8 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct address_space *mapping; pte_t new_pte; spinlock_t *ptl; + long chg; + bool use_reserve; /* * Currently, we are forced to kill the process in the event the @@ -2767,7 +2775,15 @@ retry: size = i_size_read(mapping->host) >> huge_page_shift(h); if (idx >= size) goto out; - page = alloc_huge_page(vma, address, 0); + + chg = vma_needs_reservation(h, vma, address); + if (chg == -ENOMEM) { + ret = VM_FAULT_OOM; + goto out; + } + use_reserve = !chg; + + page = alloc_huge_page(vma, address, use_reserve); if (IS_ERR(page)) { ret = PTR_ERR(page); if (ret == -ENOMEM) -- 1.7.9.5 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>