On Sun 05-05-19 14:40:57, Yafang Shao wrote: > If CONFIG_TRANSPARENT_HUGEPAGE is not set, hpage_nr_pages() is always 1; > if CONFIG_TRANSPARENT_HUGEPAGE is set, hpage_nr_pages() will > call PageTransHuge() to judge whether the page is compound page or not. > So we can use the result of hpage_nr_pages() to avoid uneccessary > PageTransHuge(). The changelog doesn't describe motivation. Does this result in a better code/performance? > Signed-off-by: Yafang Shao <laoar.shao@xxxxxxxxx> > --- > mm/memcontrol.c | 13 ++++++------- > 1 file changed, 6 insertions(+), 7 deletions(-) > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index 2535e54..65c6f7c 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -6306,7 +6306,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) > { > struct mem_cgroup *memcg; > unsigned int nr_pages; > - bool compound; > unsigned long flags; > > VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); > @@ -6328,8 +6327,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) > return; > > /* Force-charge the new page. The old one will be freed soon */ > - compound = PageTransHuge(newpage); > - nr_pages = compound ? hpage_nr_pages(newpage) : 1; > + nr_pages = hpage_nr_pages(newpage); > > page_counter_charge(&memcg->memory, nr_pages); > if (do_memsw_account()) > @@ -6339,7 +6337,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) > commit_charge(newpage, memcg, false); > > local_irq_save(flags); > - mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); > + mem_cgroup_charge_statistics(memcg, newpage, nr_pages > 1, nr_pages); > memcg_check_events(memcg, newpage); > local_irq_restore(flags); > } > @@ -6533,6 +6531,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) > struct mem_cgroup *memcg, *swap_memcg; > unsigned int nr_entries; > unsigned short oldid; > + bool compound; > > VM_BUG_ON_PAGE(PageLRU(page), page); > VM_BUG_ON_PAGE(page_count(page), page); > @@ -6553,8 +6552,9 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) > */ > swap_memcg = mem_cgroup_id_get_online(memcg); > nr_entries = hpage_nr_pages(page); > + compound = nr_entries > 1; > /* Get references for the tail pages, too */ > - if (nr_entries > 1) > + if (compound) > mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); > oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), > nr_entries); > @@ -6579,8 +6579,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) > * only synchronisation we have for updating the per-CPU variables. > */ > VM_BUG_ON(!irqs_disabled()); > - mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page), > - -nr_entries); > + mem_cgroup_charge_statistics(memcg, page, compound, -nr_entries); > memcg_check_events(memcg, page); > > if (!mem_cgroup_is_root(memcg)) > -- > 1.8.3.1 -- Michal Hocko SUSE Labs