On Tue, Nov 9, 2010 at 6:24 PM, Greg Thelen <gthelen@xxxxxxxxxx> wrote: > If called with a mem_cgroup, then throttle_vm_writeout() should > query the given cgroup for its dirty memory usage limits. > > dirty_writeback_pages() is no longer used, so delete it. > > Signed-off-by: Greg Thelen <gthelen@xxxxxxxxxx> Reviewed-by: Minchan Kim <minchan.kim@xxxxxxxxx> > --- > include/linux/writeback.h | 2 +- > mm/page-writeback.c | 31 ++++++++++++++++--------------- > mm/vmscan.c | 2 +- > 3 files changed, 18 insertions(+), 17 deletions(-) > > diff --git a/include/linux/writeback.h b/include/linux/writeback.h > index 335dba1..1bacdda 100644 > --- a/include/linux/writeback.h > +++ b/include/linux/writeback.h > @@ -97,7 +97,7 @@ void laptop_mode_timer_fn(unsigned long data); > #else > static inline void laptop_sync_completion(void) { } > #endif > -void throttle_vm_writeout(gfp_t gfp_mask); > +void throttle_vm_writeout(gfp_t gfp_mask, struct mem_cgroup *mem_cgroup); > > /* These are exported to sysctl. */ > extern int dirty_background_ratio; > diff --git a/mm/page-writeback.c b/mm/page-writeback.c > index d717fa9..bf85062 100644 > --- a/mm/page-writeback.c > +++ b/mm/page-writeback.c > @@ -131,18 +131,6 @@ EXPORT_SYMBOL(laptop_mode); > static struct prop_descriptor vm_completions; > static struct prop_descriptor vm_dirties; > > -static unsigned long dirty_writeback_pages(void) > -{ > - unsigned long ret; > - > - ret = mem_cgroup_page_stat(NULL, MEMCG_NR_DIRTY_WRITEBACK_PAGES); > - if ((long)ret < 0) > - ret = global_page_state(NR_UNSTABLE_NFS) + > - global_page_state(NR_WRITEBACK); > - > - return ret; > -} > - Nice cleanup. > /* > * couple the period to the dirty_ratio: > * > @@ -703,12 +691,25 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, > } > EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); > > -void throttle_vm_writeout(gfp_t gfp_mask) > +/* > + * Throttle the current task if it is near dirty memory usage limits. > + * If @mem_cgroup is NULL or the root_cgroup, then use global dirty memory > + * information; otherwise use the per-memcg dirty limits. > + */ > +void throttle_vm_writeout(gfp_t gfp_mask, struct mem_cgroup *mem_cgroup) > { > struct dirty_info dirty_info; > + unsigned long nr_writeback; > > for ( ; ; ) { > - global_dirty_info(&dirty_info); > + if (!mem_cgroup || !memcg_dirty_info(mem_cgroup, &dirty_info)) { > + global_dirty_info(&dirty_info); > + nr_writeback = global_page_state(NR_UNSTABLE_NFS) + > + global_page_state(NR_WRITEBACK); > + } else { > + nr_writeback = mem_cgroup_page_stat( > + mem_cgroup, MEMCG_NR_DIRTY_WRITEBACK_PAGES); In point of view rcu_read_lock removal, memcg can't destroy due to mem_cgroup_select_victim's css_tryget? Then, we can remove unnecessary rcu_read_lock in mem_cgroup_page_stat. -- Kind regards, Minchan Kim -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxxx For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom policy in Canada: sign http://dissolvethecrtc.ca/ Don't email: <a href