From: Zi Yan <ziy@xxxxxxxxxx> It sets memcg information for the pages after the split. A new parameter new_order is added to tell the new page order, always 0 for now. It prepares for upcoming changes to support split huge page to any lower order. Signed-off-by: Zi Yan <ziy@xxxxxxxxxx> Reviewed-by: Ralph Campbell <rcampbell@xxxxxxxxxx> Acked-by: Roman Gushchin <guro@xxxxxx> --- include/linux/memcontrol.h | 5 +++-- mm/huge_memory.c | 2 +- mm/memcontrol.c | 6 +++--- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index a8d5daf95988..39707feae505 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1062,7 +1062,7 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, } #ifdef CONFIG_TRANSPARENT_HUGEPAGE -void mem_cgroup_split_huge_fixup(struct page *head); +void mem_cgroup_split_huge_fixup(struct page *head, unsigned int new_order); #endif #else /* CONFIG_MEMCG */ @@ -1396,7 +1396,8 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, return 0; } -static inline void mem_cgroup_split_huge_fixup(struct page *head) +static inline void mem_cgroup_split_huge_fixup(struct page *head, + unsigned int new_order) { } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 88d8b7fce5d7..d7ab5cac5851 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2428,7 +2428,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, lruvec = mem_cgroup_page_lruvec(head, pgdat); /* complete memcg works before add pages to LRU */ - mem_cgroup_split_huge_fixup(head); + mem_cgroup_split_huge_fixup(head, 0); if (PageAnon(head) && PageSwapCache(head)) { swp_entry_t entry = { .val = page_private(head) }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index de5869dd354d..4521ed3a51b7 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3223,15 +3223,15 @@ void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size) * Because tail pages are not marked as "used", set it. We're under * pgdat->lru_lock and migration entries setup in all page mappings. */ -void mem_cgroup_split_huge_fixup(struct page *head) +void mem_cgroup_split_huge_fixup(struct page *head, unsigned int new_order) { struct mem_cgroup *memcg = page_memcg(head); - int i; + int i, new_nr = 1 << new_order; if (mem_cgroup_disabled()) return; - for (i = 1; i < thp_nr_pages(head); i++) { + for (i = new_nr; i < thp_nr_pages(head); i += new_nr) { css_get(&memcg->css); head[i].memcg_data = (unsigned long)memcg; } -- 2.28.0