* Greg Thelen <gthelen@xxxxxxxxxx> [2010-10-03 23:57:58]: > Replace usage of the mem_cgroup_update_file_mapped() memcg > statistic update routine with two new routines: > * mem_cgroup_inc_page_stat() > * mem_cgroup_dec_page_stat() > > As before, only the file_mapped statistic is managed. However, > these more general interfaces allow for new statistics to be > more easily added. New statistics are added with memcg dirty > page accounting. > > Signed-off-by: Greg Thelen <gthelen@xxxxxxxxxx> > Signed-off-by: Andrea Righi <arighi@xxxxxxxxxxx> > --- > include/linux/memcontrol.h | 31 ++++++++++++++++++++++++++++--- > mm/memcontrol.c | 17 ++++++++--------- > mm/rmap.c | 4 ++-- > 3 files changed, 38 insertions(+), 14 deletions(-) > > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h > index 159a076..7c7bec4 100644 > --- a/include/linux/memcontrol.h > +++ b/include/linux/memcontrol.h > @@ -25,6 +25,11 @@ struct page_cgroup; > struct page; > struct mm_struct; > > +/* Stats that can be updated by kernel. */ > +enum mem_cgroup_write_page_stat_item { > + MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */ > +}; > + > extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, > struct list_head *dst, > unsigned long *scanned, int order, > @@ -121,7 +126,22 @@ static inline bool mem_cgroup_disabled(void) > return false; > } > > -void mem_cgroup_update_file_mapped(struct page *page, int val); > +void mem_cgroup_update_page_stat(struct page *page, > + enum mem_cgroup_write_page_stat_item idx, > + int val); > + > +static inline void mem_cgroup_inc_page_stat(struct page *page, > + enum mem_cgroup_write_page_stat_item idx) > +{ > + mem_cgroup_update_page_stat(page, idx, 1); > +} > + > +static inline void mem_cgroup_dec_page_stat(struct page *page, > + enum mem_cgroup_write_page_stat_item idx) > +{ > + mem_cgroup_update_page_stat(page, idx, -1); > +} > + > unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, > gfp_t gfp_mask); > u64 mem_cgroup_get_limit(struct mem_cgroup *mem); > @@ -293,8 +313,13 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) > { > } > > -static inline void mem_cgroup_update_file_mapped(struct page *page, > - int val) > +static inline void mem_cgroup_inc_page_stat(struct page *page, > + enum mem_cgroup_write_page_stat_item idx) > +{ > +} > + > +static inline void mem_cgroup_dec_page_stat(struct page *page, > + enum mem_cgroup_write_page_stat_item idx) > { > } > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index 512cb12..f4259f4 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -1592,7 +1592,9 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask) > * possibility of race condition. If there is, we take a lock. > */ > > -static void mem_cgroup_update_file_stat(struct page *page, int idx, int val) > +void mem_cgroup_update_page_stat(struct page *page, > + enum mem_cgroup_write_page_stat_item idx, > + int val) > { > struct mem_cgroup *mem; > struct page_cgroup *pc = lookup_page_cgroup(page); > @@ -1615,30 +1617,27 @@ static void mem_cgroup_update_file_stat(struct page *page, int idx, int val) > goto out; > } > > - this_cpu_add(mem->stat->count[idx], val); > - > switch (idx) { > - case MEM_CGROUP_STAT_FILE_MAPPED: > + case MEMCG_NR_FILE_MAPPED: > if (val > 0) > SetPageCgroupFileMapped(pc); > else if (!page_mapped(page)) > ClearPageCgroupFileMapped(pc); > + idx = MEM_CGROUP_STAT_FILE_MAPPED; > break; > default: > BUG(); > } > > + this_cpu_add(mem->stat->count[idx], val); > + > out: > if (unlikely(need_unlock)) > unlock_page_cgroup(pc); > rcu_read_unlock(); > return; > } > - > -void mem_cgroup_update_file_mapped(struct page *page, int val) > -{ > - mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val); > -} > +EXPORT_SYMBOL(mem_cgroup_update_page_stat); > > /* > * size of first charge trial. "32" comes from vmscan.c's magic value. > diff --git a/mm/rmap.c b/mm/rmap.c > index 8734312..779c0db 100644 > --- a/mm/rmap.c > +++ b/mm/rmap.c > @@ -912,7 +912,7 @@ void page_add_file_rmap(struct page *page) > { > if (atomic_inc_and_test(&page->_mapcount)) { > __inc_zone_page_state(page, NR_FILE_MAPPED); > - mem_cgroup_update_file_mapped(page, 1); > + mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED); > } > } > > @@ -950,7 +950,7 @@ void page_remove_rmap(struct page *page) > __dec_zone_page_state(page, NR_ANON_PAGES); > } else { > __dec_zone_page_state(page, NR_FILE_MAPPED); > - mem_cgroup_update_file_mapped(page, -1); > + mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED); > } > /* > * It would be tidy to reset the PageAnon mapping here, Acked-by: Balbir Singh <balbir@xxxxxxxxxxxxxxxxxx> -- Three Cheers, Balbir -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxxx For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>