On Fri 09-08-24 10:26:18, Shakeel Butt wrote: > At the moment memcg IDs are managed through IDR which requires external > synchronization mechanisms and makes the allocation code a bit awkward. > Let's switch to xarray and make the code simpler. > > Suggested-by: Matthew Wilcox <willy@xxxxxxxxxxxxx> > Signed-off-by: Shakeel Butt <shakeel.butt@xxxxxxxxx> Neat! I was not aware of this feature of XArray. Is there any actual reason for idr to have its own implementation with XArray offering a better interface? Acked-by: Michal Hocko <mhocko@xxxxxxxx> Thanks! > --- > mm/memcontrol.c | 34 +++++++--------------------------- > 1 file changed, 7 insertions(+), 27 deletions(-) > > diff --git a/mm/memcontrol.c b/mm/memcontrol.c > index e1ffd2950393..b8e6b98485c6 100644 > --- a/mm/memcontrol.c > +++ b/mm/memcontrol.c > @@ -3363,29 +3363,12 @@ static void memcg_wb_domain_size_changed(struct mem_cgroup *memcg) > */ > > #define MEM_CGROUP_ID_MAX ((1UL << MEM_CGROUP_ID_SHIFT) - 1) > -static DEFINE_IDR(mem_cgroup_idr); > -static DEFINE_SPINLOCK(memcg_idr_lock); > - > -static int mem_cgroup_alloc_id(void) > -{ > - int ret; > - > - idr_preload(GFP_KERNEL); > - spin_lock(&memcg_idr_lock); > - ret = idr_alloc(&mem_cgroup_idr, NULL, 1, MEM_CGROUP_ID_MAX + 1, > - GFP_NOWAIT); > - spin_unlock(&memcg_idr_lock); > - idr_preload_end(); > - return ret; > -} > +static DEFINE_XARRAY_ALLOC1(mem_cgroup_ids); > > static void mem_cgroup_id_remove(struct mem_cgroup *memcg) > { > if (memcg->id.id > 0) { > - spin_lock(&memcg_idr_lock); > - idr_remove(&mem_cgroup_idr, memcg->id.id); > - spin_unlock(&memcg_idr_lock); > - > + xa_erase(&mem_cgroup_ids, memcg->id.id); > memcg->id.id = 0; > } > } > @@ -3420,7 +3403,7 @@ static inline void mem_cgroup_id_put(struct mem_cgroup *memcg) > struct mem_cgroup *mem_cgroup_from_id(unsigned short id) > { > WARN_ON_ONCE(!rcu_read_lock_held()); > - return idr_find(&mem_cgroup_idr, id); > + return xa_load(&mem_cgroup_ids, id); > } > > #ifdef CONFIG_SHRINKER_DEBUG > @@ -3519,11 +3502,10 @@ static struct mem_cgroup *mem_cgroup_alloc(struct mem_cgroup *parent) > if (!memcg) > return ERR_PTR(error); > > - memcg->id.id = mem_cgroup_alloc_id(); > - if (memcg->id.id < 0) { > - error = memcg->id.id; > + error = xa_alloc(&mem_cgroup_ids, &memcg->id.id, NULL, > + XA_LIMIT(1, MEM_CGROUP_ID_MAX), GFP_KERNEL); > + if (error) > goto fail; > - } > > memcg->vmstats = kzalloc(sizeof(struct memcg_vmstats), > GFP_KERNEL_ACCOUNT); > @@ -3664,9 +3646,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) > * publish it here at the end of onlining. This matches the > * regular ID destruction during offlining. > */ > - spin_lock(&memcg_idr_lock); > - idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); > - spin_unlock(&memcg_idr_lock); > + xa_store(&mem_cgroup_ids, memcg->id.id, memcg, GFP_KERNEL); > > return 0; > offline_kmem: > -- > 2.43.5 > -- Michal Hocko SUSE Labs