On Mon, Jun 25, 2018 at 04:06:58PM -0700, Shakeel Butt wrote: > @@ -140,8 +141,9 @@ struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group, > struct inode *inode, u32 mask, > const struct path *path) > { > - struct fanotify_event_info *event; > + struct fanotify_event_info *event = NULL; > gfp_t gfp = GFP_KERNEL; > + struct mem_cgroup *old_memcg = NULL; > > /* > * For queues with unlimited length lost events are not expected and > @@ -151,19 +153,25 @@ struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group, > if (group->max_events == UINT_MAX) > gfp |= __GFP_NOFAIL; > > + /* Whoever is interested in the event, pays for the allocation. */ > + if (group->memcg) { > + gfp |= __GFP_ACCOUNT; > + old_memcg = memalloc_use_memcg(group->memcg); > + } group->memcg is only NULL when memcg is disabled or there is some offlining race. Can you make memalloc_use_memcg(NULL) mean that it should charge root_mem_cgroup instead of current->mm->memcg? That way we can make this site unconditional while retaining the behavior: gfp_t gfp = GFP_KERNEL | __GFP_ACCOUNT; memalloc_use_memcg(group->memcg); kmem_cache_alloc(..., gfp); out: memalloc_unuse_memcg(); (dropping old_memcg and the unuse parameter as per the other mail) > if (fanotify_is_perm_event(mask)) { > struct fanotify_perm_event_info *pevent; > > pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); > if (!pevent) > - return NULL; > + goto out; > event = &pevent->fae; > pevent->response = 0; > goto init; > } > event = kmem_cache_alloc(fanotify_event_cachep, gfp); > if (!event) > - return NULL; > + goto out; > init: __maybe_unused > fsnotify_init_event(&event->fse, inode, mask); > event->tgid = get_pid(task_tgid(current)); > @@ -174,6 +182,9 @@ init: __maybe_unused > event->path.mnt = NULL; > event->path.dentry = NULL; > } > +out: > + if (group->memcg) > + memalloc_unuse_memcg(old_memcg); > return event; > } Thanks, Johannes -- To unsubscribe from this list: send the line "unsubscribe cgroups" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html