On Mon 12-02-24 13:39:17, Suren Baghdasaryan wrote: [...] > @@ -423,4 +424,18 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) > #ifdef CONFIG_MEMORY_FAILURE > printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); > #endif > +#ifdef CONFIG_MEM_ALLOC_PROFILING > + { > + struct seq_buf s; > + char *buf = kmalloc(4096, GFP_ATOMIC); > + > + if (buf) { > + printk("Memory allocations:\n"); > + seq_buf_init(&s, buf, 4096); > + alloc_tags_show_mem_report(&s); > + printk("%s", buf); > + kfree(buf); > + } > + } > +#endif I am pretty sure I have already objected to this. Memory allocations in the oom path are simply no go unless there is absolutely no other way around that. In this case the buffer could be preallocated. -- Michal Hocko SUSE Labs