Rename memcg_check_events() into memcg1_check_events() for consistency with other cgroup v1-specific functions. Signed-off-by: Roman Gushchin <roman.gushchin@xxxxxxxxx> --- mm/memcontrol-v1.c | 6 +++--- mm/memcontrol-v1.h | 2 +- mm/memcontrol.c | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index c47ffb6105931..8b8c2c9516349 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -835,9 +835,9 @@ static int mem_cgroup_move_account(struct folio *folio, local_irq_disable(); mem_cgroup_charge_statistics(to, nr_pages); - memcg_check_events(to, nid); + memcg1_check_events(to, nid); mem_cgroup_charge_statistics(from, -nr_pages); - memcg_check_events(from, nid); + memcg1_check_events(from, nid); local_irq_enable(); out: return ret; @@ -1424,7 +1424,7 @@ static void mem_cgroup_threshold(struct mem_cgroup *memcg) * Check events in order. * */ -void memcg_check_events(struct mem_cgroup *memcg, int nid) +void memcg1_check_events(struct mem_cgroup *memcg, int nid) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) return; diff --git a/mm/memcontrol-v1.h b/mm/memcontrol-v1.h index 524a2c76ffc97..ef1b7037cbdcc 100644 --- a/mm/memcontrol-v1.h +++ b/mm/memcontrol-v1.h @@ -12,7 +12,7 @@ static inline void memcg1_soft_limit_reset(struct mem_cgroup *memcg) } void mem_cgroup_charge_statistics(struct mem_cgroup *memcg, int nr_pages); -void memcg_check_events(struct mem_cgroup *memcg, int nid); +void memcg1_check_events(struct mem_cgroup *memcg, int nid); void memcg_oom_recover(struct mem_cgroup *memcg); int try_charge_memcg(struct mem_cgroup *memcg, gfp_t gfp_mask, unsigned int nr_pages); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6bc9009bee517..c14b1b01bcf53 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2632,7 +2632,7 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg) local_irq_disable(); mem_cgroup_charge_statistics(memcg, folio_nr_pages(folio)); - memcg_check_events(memcg, folio_nid(folio)); + memcg1_check_events(memcg, folio_nid(folio)); local_irq_enable(); } @@ -5699,7 +5699,7 @@ static void uncharge_batch(const struct uncharge_gather *ug) local_irq_save(flags); __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_memory); - memcg_check_events(ug->memcg, ug->nid); + memcg1_check_events(ug->memcg, ug->nid); local_irq_restore(flags); /* drop reference from uncharge_folio */ @@ -5839,7 +5839,7 @@ void mem_cgroup_replace_folio(struct folio *old, struct folio *new) local_irq_save(flags); mem_cgroup_charge_statistics(memcg, nr_pages); - memcg_check_events(memcg, folio_nid(new)); + memcg1_check_events(memcg, folio_nid(new)); local_irq_restore(flags); } @@ -6106,7 +6106,7 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) memcg_stats_lock(); mem_cgroup_charge_statistics(memcg, -nr_entries); memcg_stats_unlock(); - memcg_check_events(memcg, folio_nid(folio)); + memcg1_check_events(memcg, folio_nid(folio)); css_put(&memcg->css); } -- 2.45.1