On Sat 24-12-11 05:00:19, Kirill A. Shutemov wrote: > From: "Kirill A. Shutemov" <kirill@xxxxxxxxxxxxx> > > Signed-off-by: Kirill A. Shutemov <kirill@xxxxxxxxxxxxx> I wasn't very convinced at first but it makes some sense as we should be consistent. So Acked-by: Michal Hocko <mhocko@xxxxxxx> > --- > mm/memcontrol.c | 28 ++++++++++++++-------------- > 1 files changed, 14 insertions(+), 14 deletions(-) > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index 3833a7b..48cba05 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -73,7 +73,7 @@ static int really_do_swap_account __initdata = 0; > #endif > > #else > -#define do_swap_account (0) > +#define do_swap_account 0 > #endif > > > @@ -113,9 +113,9 @@ enum mem_cgroup_events_target { > MEM_CGROUP_TARGET_NUMAINFO, > MEM_CGROUP_NTARGETS, > }; > -#define THRESHOLDS_EVENTS_TARGET (128) > -#define SOFTLIMIT_EVENTS_TARGET (1024) > -#define NUMAINFO_EVENTS_TARGET (1024) > +#define THRESHOLDS_EVENTS_TARGET 128 > +#define SOFTLIMIT_EVENTS_TARGET 1024 > +#define NUMAINFO_EVENTS_TARGET 1024 > > struct mem_cgroup_stat_cpu { > long count[MEM_CGROUP_STAT_NSTATS]; > @@ -148,7 +148,7 @@ struct mem_cgroup_per_zone { > /* use container_of */ > }; > /* Macro for accessing counter */ > -#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) > +#define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[idx]) > > struct mem_cgroup_per_node { > struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; > @@ -346,8 +346,8 @@ static bool move_file(void) > * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft > * limit reclaim to prevent infinite loops, if they ever occur. > */ > -#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100) > -#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2) > +#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100 > +#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2 > > enum charge_type { > MEM_CGROUP_CHARGE_TYPE_CACHE = 0, > @@ -368,11 +368,11 @@ enum mem_type { > _KMEM, > }; > > -#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) > -#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) > +#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val)) > +#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff) > #define MEMFILE_ATTR(val) ((val) & 0xffff) > /* Used for OOM nofiier */ > -#define OOM_CONTROL (0) > +#define OOM_CONTROL 0 > > /* > * Reclaim flags for mem_cgroup_hierarchical_reclaim > @@ -1913,7 +1913,7 @@ struct memcg_stock_pcp { > unsigned int nr_pages; > struct work_struct work; > unsigned long flags; > -#define FLUSHING_CACHED_CHARGE (0) > +#define FLUSHING_CACHED_CHARGE 0 > }; > static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); > static DEFINE_MUTEX(percpu_charge_mutex); > @@ -2094,7 +2094,7 @@ static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb, > struct memcg_stock_pcp *stock; > struct mem_cgroup *iter; > > - if ((action == CPU_ONLINE)) { > + if (action == CPU_ONLINE) { > for_each_mem_cgroup(iter) > synchronize_mem_cgroup_on_move(iter, cpu); > return NOTIFY_OK; > @@ -2458,8 +2458,8 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, > > #ifdef CONFIG_TRANSPARENT_HUGEPAGE > > -#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\ > - (1 << PCG_MIGRATION)) > +#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MOVE_LOCK |\ > + 1 << PCG_MIGRATION) > /* > * Because tail pages are not marked as "used", set it. We're under > * zone->lru_lock, 'splitting on pmd' and compound_lock. > -- > 1.7.7.3 > -- Michal Hocko SUSE Labs SUSE LINUX s.r.o. Lihovarska 1060/12 190 00 Praha 9 Czech Republic -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@xxxxxxxxx. For more info on Linux MM, see: http://www.linux-mm.org/ . Fight unfair telecom internet charges in Canada: sign http://stopthemeter.ca/ Don't email: <a href=mailto:"dont@xxxxxxxxx"> email@xxxxxxxxx </a>