On Mon, Jul 29, 2013 at 1:28 PM, Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> wrote: > This patch unifies successful allocation paths to make the code more > readable. There are no functional changes. > > Acked-by: Michal Hocko <mhocko@xxxxxxx> > Reviewed-by: Wanpeng Li <liwanp@xxxxxxxxxxxxxxxxxx> > Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxxxxxxx> > Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx> > Acked-by: Hillf Danton <dhillf@xxxxxxxxx> > diff --git a/mm/hugetlb.c b/mm/hugetlb.c > index 51564a8..31d78c5 100644 > --- a/mm/hugetlb.c > +++ b/mm/hugetlb.c > @@ -1149,12 +1149,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, > } > spin_lock(&hugetlb_lock); > page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); > - if (page) { > - /* update page cgroup details */ > - hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), > - h_cg, page); > - spin_unlock(&hugetlb_lock); > - } else { > + if (!page) { > spin_unlock(&hugetlb_lock); > page = alloc_buddy_huge_page(h, NUMA_NO_NODE); > if (!page) { > @@ -1165,11 +1160,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, > return ERR_PTR(-ENOSPC); > } > spin_lock(&hugetlb_lock); > - hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), > - h_cg, page); > list_move(&page->lru, &h->hugepage_activelist); > - spin_unlock(&hugetlb_lock); > + /* Fall through */ > } > + hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); > + spin_unlock(&hugetlb_lock); > > set_page_private(page, (unsigned long)spool); > > -- > 1.7.9.5 > -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>