We don't have to call vma_has_reserve() each time we need information. Passing has_reserve unburden this effort. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ff46a2c..1426c03 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -572,7 +572,8 @@ static struct page *dequeue_huge_page_node(struct hstate *h, int nid) static struct page *dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, - unsigned long address, int avoid_reserve) + unsigned long address, + int has_reserve, int avoid_reserve) { struct page *page = NULL; struct mempolicy *mpol; @@ -587,8 +588,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, * have no page reserves. This check ensures that reservations are * not "stolen". The child may still get SIGKILLed */ - if (!vma_has_reserves(h, vma, address) && - h->free_huge_pages - h->resv_huge_pages == 0) + if (!has_reserve && h->free_huge_pages - h->resv_huge_pages == 0) return NULL; /* If reserves cannot be used, ensure enough pages are in the pool */ @@ -607,7 +607,7 @@ retry_cpuset: if (page) { if (avoid_reserve) break; - if (!vma_has_reserves(h, vma, address)) + if (!has_reserve) break; h->resv_huge_pages--; @@ -1159,7 +1159,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, return ERR_PTR(-ENOSPC); } spin_lock(&hugetlb_lock); - page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); + page = dequeue_huge_page_vma(h, vma, addr, has_reserve, avoid_reserve); if (!page) { spin_unlock(&hugetlb_lock); page = alloc_buddy_huge_page(h, NUMA_NO_NODE); -- 1.7.9.5 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>