On Sun 13-09-15 16:14:16, Tejun Heo wrote: > memcg_kmem_newpage_charge() and memcg_kmem_get_cache() are testing the > same series of conditions to decide whether to bypass kmem accounting. > Collect the tests into __memcg_kmem_bypass(). > > This is pure refactoring. > > Signed-off-by: Tejun Heo <tj@xxxxxxxxxx> Acked-by: Michal Hocko <mhocko@xxxxxxxx> > --- > Hello, > > These three patches are on top of mmotm as of Sep 13th and the two > patches from the following thread. > > http://lkml.kernel.org/g/20150913185940.GA25369@xxxxxxxxxxxxxxx > > Thanks. > > include/linux/memcontrol.h | 46 +++++++++++++++++++++------------------------ > 1 file changed, 22 insertions(+), 24 deletions(-) > > --- a/include/linux/memcontrol.h > +++ b/include/linux/memcontrol.h > @@ -776,20 +776,7 @@ int memcg_charge_kmem(struct mem_cgroup > unsigned long nr_pages); > void memcg_uncharge_kmem(struct mem_cgroup *memcg, unsigned long nr_pages); > > -/** > - * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. > - * @gfp: the gfp allocation flags. > - * @memcg: a pointer to the memcg this was charged against. > - * @order: allocation order. > - * > - * returns true if the memcg where the current task belongs can hold this > - * allocation. > - * > - * We return true automatically if this allocation is not to be accounted to > - * any memcg. > - */ > -static inline bool > -memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) > +static inline bool __memcg_kmem_bypass(gfp_t gfp) > { > if (!memcg_kmem_enabled()) > return true; > @@ -811,6 +798,26 @@ memcg_kmem_newpage_charge(gfp_t gfp, str > if (unlikely(fatal_signal_pending(current))) > return true; > > + return false; > +} > + > +/** > + * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. > + * @gfp: the gfp allocation flags. > + * @memcg: a pointer to the memcg this was charged against. > + * @order: allocation order. > + * > + * returns true if the memcg where the current task belongs can hold this > + * allocation. > + * > + * We return true automatically if this allocation is not to be accounted to > + * any memcg. > + */ > +static inline bool > +memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) > +{ > + if (__memcg_kmem_bypass(gfp)) > + return true; > return __memcg_kmem_newpage_charge(gfp, memcg, order); > } > > @@ -853,17 +860,8 @@ memcg_kmem_commit_charge(struct page *pa > static __always_inline struct kmem_cache * > memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) > { > - if (!memcg_kmem_enabled()) > - return cachep; > - if (gfp & __GFP_NOACCOUNT) > - return cachep; > - if (gfp & __GFP_NOFAIL) > + if (__memcg_kmem_bypass(gfp)) > return cachep; > - if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) > - return cachep; > - if (unlikely(fatal_signal_pending(current))) > - return cachep; > - > return __memcg_kmem_get_cache(cachep); > } > -- Michal Hocko SUSE Labs -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>