From: "Aneesh Kumar K.V" <aneesh.kumar@xxxxxxxxxxxxxxxxxx> This adds necessary charge/uncharge calls in the HugeTLB code. We do hugetlb cgroup charge in page alloc and uncharge in compound page destructor. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@xxxxxxxxxxxxxxxxxx> --- mm/hugetlb.c | 16 +++++++++++++++- mm/hugetlb_cgroup.c | 7 +------ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bf79131..4ca92a9 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -628,6 +628,8 @@ static void free_huge_page(struct page *page) BUG_ON(page_mapcount(page)); spin_lock(&hugetlb_lock); + hugetlb_cgroup_uncharge_page(hstate_index(h), + pages_per_huge_page(h), page); if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) { /* remove the page from active list */ list_del(&page->lru); @@ -1116,7 +1118,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, struct hstate *h = hstate_vma(vma); struct page *page; long chg; + int ret, idx; + struct hugetlb_cgroup *h_cg; + idx = hstate_index(h); /* * Processes that did not create the mapping will have no * reserves and will not have accounted against subpool @@ -1132,6 +1137,11 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, if (hugepage_subpool_get_pages(spool, chg)) return ERR_PTR(-ENOSPC); + ret = hugetlb_cgroup_charge_page(idx, pages_per_huge_page(h), &h_cg); + if (ret) { + hugepage_subpool_put_pages(spool, chg); + return ERR_PTR(-ENOSPC); + } spin_lock(&hugetlb_lock); page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); spin_unlock(&hugetlb_lock); @@ -1139,6 +1149,9 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, if (!page) { page = alloc_buddy_huge_page(h, NUMA_NO_NODE); if (!page) { + hugetlb_cgroup_uncharge_cgroup(idx, + pages_per_huge_page(h), + h_cg); hugepage_subpool_put_pages(spool, chg); return ERR_PTR(-ENOSPC); } @@ -1147,7 +1160,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, set_page_private(page, (unsigned long)spool); vma_commit_reservation(h, vma, addr); - + /* update page cgroup details */ + hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); return page; } diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index 2a4881d..c2b7b8e 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -249,15 +249,10 @@ void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, if (hugetlb_cgroup_disabled()) return; - spin_lock(&hugetlb_lock); h_cg = hugetlb_cgroup_from_page(page); - if (unlikely(!h_cg)) { - spin_unlock(&hugetlb_lock); + if (unlikely(!h_cg)) return; - } set_hugetlb_cgroup(page, NULL); - spin_unlock(&hugetlb_lock); - res_counter_uncharge(&h_cg->hugepage[idx], csize); return; } -- 1.7.10 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>