On Thu 28-06-12 18:58:31, Sha Zhengju wrote: > From: Sha Zhengju <handai.szj@xxxxxxxxxx> > > While accounting memcg page stat, it's not worth to use MEMCG_NR_FILE_MAPPED > as an extra layer of indirection because of the complexity and presumed > performance overhead. We can use MEM_CGROUP_STAT_FILE_MAPPED directly. > > Signed-off-by: Sha Zhengju <handai.szj@xxxxxxxxxx> Acked-by: Michal Hocko <mhocko@xxxxxxx> > --- > include/linux/memcontrol.h | 25 +++++++++++++++++-------- > mm/memcontrol.c | 24 +----------------------- > mm/rmap.c | 4 ++-- > 3 files changed, 20 insertions(+), 33 deletions(-) > > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h > index 83e7ba9..20b0f2d 100644 > --- a/include/linux/memcontrol.h > +++ b/include/linux/memcontrol.h > @@ -27,9 +27,18 @@ struct page_cgroup; > struct page; > struct mm_struct; > > -/* Stats that can be updated by kernel. */ > -enum mem_cgroup_page_stat_item { > - MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ > +/* > + * Statistics for memory cgroup. > + */ > +enum mem_cgroup_stat_index { > + /* > + * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. > + */ > + MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ > + MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ > + MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ > + MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ > + MEM_CGROUP_STAT_NSTATS, > }; > > struct mem_cgroup_reclaim_cookie { > @@ -164,17 +173,17 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page, > } > > void mem_cgroup_update_page_stat(struct page *page, > - enum mem_cgroup_page_stat_item idx, > + enum mem_cgroup_stat_index idx, > int val); > > static inline void mem_cgroup_inc_page_stat(struct page *page, > - enum mem_cgroup_page_stat_item idx) > + enum mem_cgroup_stat_index idx) > { > mem_cgroup_update_page_stat(page, idx, 1); > } > > static inline void mem_cgroup_dec_page_stat(struct page *page, > - enum mem_cgroup_page_stat_item idx) > + enum mem_cgroup_stat_index idx) > { > mem_cgroup_update_page_stat(page, idx, -1); > } > @@ -349,12 +358,12 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page, > } > > static inline void mem_cgroup_inc_page_stat(struct page *page, > - enum mem_cgroup_page_stat_item idx) > + enum mem_cgroup_stat_index idx) > { > } > > static inline void mem_cgroup_dec_page_stat(struct page *page, > - enum mem_cgroup_page_stat_item idx) > + enum mem_cgroup_stat_index idx) > { > } > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index a2677e0..ebed1ca 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -77,20 +77,6 @@ static int really_do_swap_account __initdata = 0; > #endif > > > -/* > - * Statistics for memory cgroup. > - */ > -enum mem_cgroup_stat_index { > - /* > - * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. > - */ > - MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ > - MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ > - MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ > - MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ > - MEM_CGROUP_STAT_NSTATS, > -}; > - > static const char * const mem_cgroup_stat_names[] = { > "cache", > "rss", > @@ -1926,7 +1912,7 @@ void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags) > } > > void mem_cgroup_update_page_stat(struct page *page, > - enum mem_cgroup_page_stat_item idx, int val) > + enum mem_cgroup_stat_index idx, int val) > { > struct mem_cgroup *memcg; > struct page_cgroup *pc = lookup_page_cgroup(page); > @@ -1939,14 +1925,6 @@ void mem_cgroup_update_page_stat(struct page *page, > if (unlikely(!memcg || !PageCgroupUsed(pc))) > return; > > - switch (idx) { > - case MEMCG_NR_FILE_MAPPED: > - idx = MEM_CGROUP_STAT_FILE_MAPPED; > - break; > - default: > - BUG(); > - } > - > this_cpu_add(memcg->stat->count[idx], val); > } > > diff --git a/mm/rmap.c b/mm/rmap.c > index 2144160..d6b93df 100644 > --- a/mm/rmap.c > +++ b/mm/rmap.c > @@ -1148,7 +1148,7 @@ void page_add_file_rmap(struct page *page) > mem_cgroup_begin_update_page_stat(page, &locked, &flags); > if (atomic_inc_and_test(&page->_mapcount)) { > __inc_zone_page_state(page, NR_FILE_MAPPED); > - mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED); > + mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); > } > mem_cgroup_end_update_page_stat(page, &locked, &flags); > } > @@ -1202,7 +1202,7 @@ void page_remove_rmap(struct page *page) > NR_ANON_TRANSPARENT_HUGEPAGES); > } else { > __dec_zone_page_state(page, NR_FILE_MAPPED); > - mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED); > + mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); > } > /* > * It would be tidy to reset the PageAnon mapping here, > -- > 1.7.1 > -- Michal Hocko SUSE Labs SUSE LINUX s.r.o. Lihovarska 1060/12 190 00 Praha 9 Czech Republic -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>