The patch titled Subject: mm/hugetlb.c: rename some allocation functions has been removed from the -mm tree. Its filename was mm-hugetlb-rename-some-allocation-functions.patch This patch was dropped because an updated version will be merged ------------------------------------------------------ From: Huang Shijie <shijie.huang@xxxxxxx> Subject: mm/hugetlb.c: rename some allocation functions After a future patch, the __alloc_buddy_huge_page() will not necessarily use the buddy allocator. So this patch removes the "buddy" from these functions: __alloc_buddy_huge_page -> __alloc_huge_page __alloc_buddy_huge_page_no_mpol -> __alloc_huge_page_no_mpol __alloc_buddy_huge_page_with_mpol -> __alloc_huge_page_with_mpol This patch makes preparation for the later patch. Link: http://lkml.kernel.org/r/1478229075-20262-1-git-send-email-shijie.huang@xxxxxxx Signed-off-by: Huang Shijie <shijie.huang@xxxxxxx> Acked-by: Steve Capper <steve.capper@xxxxxxx> Cc: Naoya Horiguchi <n-horiguchi@xxxxxxxxxxxxx> Cc: Michal Hocko <mhocko@xxxxxxxx> Cc: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx> Cc: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxxxxxxx> Cc: Gerald Schaefer <gerald.schaefer@xxxxxxxxxx> Cc: Mike Kravetz <mike.kravetz@xxxxxxxxxx> Cc: Will Deacon <will.deacon@xxxxxxx> Cc: Steve Capper <steve.capper@xxxxxxx> Cc: Kaly Xin <kaly.xin@xxxxxxx> Signed-off-by: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx> --- mm/hugetlb.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff -puN mm/hugetlb.c~mm-hugetlb-rename-some-allocation-functions mm/hugetlb.c --- a/mm/hugetlb.c~mm-hugetlb-rename-some-allocation-functions +++ a/mm/hugetlb.c @@ -1163,6 +1163,10 @@ static inline void destroy_compound_giga unsigned int order) { } static inline int alloc_fresh_gigantic_page(struct hstate *h, nodemask_t *nodes_allowed) { return 0; } +static struct page *alloc_gigantic_page(int nid, unsigned int order) +{ + return NULL; +} #endif static void update_and_free_page(struct hstate *h, struct page *page) @@ -1568,7 +1572,7 @@ static struct page *__hugetlb_alloc_budd * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This * implies that memory policies will not be taken in to account. */ -static struct page *__alloc_buddy_huge_page(struct hstate *h, +static struct page *__alloc_huge_page(struct hstate *h, struct vm_area_struct *vma, unsigned long addr, int nid) { struct page *page; @@ -1649,21 +1653,21 @@ static struct page *__alloc_buddy_huge_p * anywhere. */ static -struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid) +struct page *__alloc_huge_page_no_mpol(struct hstate *h, int nid) { unsigned long addr = -1; - return __alloc_buddy_huge_page(h, NULL, addr, nid); + return __alloc_huge_page(h, NULL, addr, nid); } /* * Use the VMA's mpolicy to allocate a huge page from the buddy. */ static -struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h, +struct page *__alloc_huge_page_with_mpol(struct hstate *h, struct vm_area_struct *vma, unsigned long addr) { - return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE); + return __alloc_huge_page(h, vma, addr, NUMA_NO_NODE); } /* @@ -1681,7 +1685,7 @@ struct page *alloc_huge_page_node(struct spin_unlock(&hugetlb_lock); if (!page) - page = __alloc_buddy_huge_page_no_mpol(h, nid); + page = __alloc_huge_page_no_mpol(h, nid); return page; } @@ -1711,7 +1715,7 @@ static int gather_surplus_pages(struct h retry: spin_unlock(&hugetlb_lock); for (i = 0; i < needed; i++) { - page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE); + page = __alloc_huge_page_no_mpol(h, NUMA_NO_NODE); if (!page) { alloc_ok = false; break; @@ -2027,7 +2031,7 @@ struct page *alloc_huge_page(struct vm_a page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); if (!page) { spin_unlock(&hugetlb_lock); - page = __alloc_buddy_huge_page_with_mpol(h, vma, addr); + page = __alloc_huge_page_with_mpol(h, vma, addr); if (!page) goto out_uncharge_cgroup; if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { @@ -2285,7 +2289,7 @@ static unsigned long set_max_huge_pages( * First take pages out of surplus state. Then make up the * remaining difference by allocating fresh huge pages. * - * We might race with __alloc_buddy_huge_page() here and be unable + * We might race with __alloc_huge_page() here and be unable * to convert a surplus huge page to a normal huge page. That is * not critical, though, it just means the overall size of the * pool might be one hugepage larger than it needs to be, but @@ -2331,7 +2335,7 @@ static unsigned long set_max_huge_pages( * By placing pages into the surplus state independent of the * overcommit value, we are allowing the surplus pool size to * exceed overcommit. There are few sane options here. Since - * __alloc_buddy_huge_page() is checking the global counter, + * __alloc_huge_page() is checking the global counter, * though, we'll note that we're not allowed to exceed surplus * and won't grow the pool anywhere else. Not until one of the * sysctls are changed, or the surplus pages go out of use. _ Patches currently in -mm which might be from shijie.huang@xxxxxxx are -- To unsubscribe from this list: send the line "unsubscribe mm-commits" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html