On Tue, Sep 15, 2020 at 6:57 AM Shakeel Butt <shakeelb@xxxxxxxxxx> wrote: > > On Mon, Sep 14, 2020 at 9:55 AM Muchun Song <songmuchun@xxxxxxxxxxxxx> wrote: > > > > On Tue, Sep 15, 2020 at 12:07 AM Shakeel Butt <shakeelb@xxxxxxxxxx> wrote: > > > > > > On Sun, Sep 13, 2020 at 12:01 AM Muchun Song <songmuchun@xxxxxxxxxxxxx> wrote: > > > > > > > > In the cgroup v1, we have a numa_stat interface. This is useful for > > > > providing visibility into the numa locality information within an > > > > memcg since the pages are allowed to be allocated from any physical > > > > node. One of the use cases is evaluating application performance by > > > > combining this information with the application's CPU allocation. > > > > But the cgroup v2 does not. So this patch adds the missing information. > > > > > > > > Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx> > > > > Suggested-by: Shakeel Butt <shakeelb@xxxxxxxxxx> > > > > Reported-by: kernel test robot <lkp@xxxxxxxxx> > > > > --- > > > [snip] > > > > + > > > > +static struct numa_stat numa_stats[] = { > > > > + { "anon", PAGE_SIZE, NR_ANON_MAPPED }, > > > > + { "file", PAGE_SIZE, NR_FILE_PAGES }, > > > > + { "kernel_stack", 1024, NR_KERNEL_STACK_KB }, > > > > + { "shmem", PAGE_SIZE, NR_SHMEM }, > > > > + { "file_mapped", PAGE_SIZE, NR_FILE_MAPPED }, > > > > + { "file_dirty", PAGE_SIZE, NR_FILE_DIRTY }, > > > > + { "file_writeback", PAGE_SIZE, NR_WRITEBACK }, > > > > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE > > > > + /* > > > > + * The ratio will be initialized in numa_stats_init(). Because > > > > + * on some architectures, the macro of HPAGE_PMD_SIZE is not > > > > + * constant(e.g. powerpc). > > > > + */ > > > > + { "anon_thp", 0, NR_ANON_THPS }, > > > > +#endif > > > > + { "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON }, > > > > + { "active_anon", PAGE_SIZE, NR_ACTIVE_ANON }, > > > > + { "inactive_file", PAGE_SIZE, NR_INACTIVE_FILE }, > > > > + { "active_file", PAGE_SIZE, NR_ACTIVE_FILE }, > > > > + { "unevictable", PAGE_SIZE, NR_UNEVICTABLE }, > > > > + { "slab_reclaimable", 1, NR_SLAB_RECLAIMABLE_B }, > > > > + { "slab_unreclaimable", 1, NR_SLAB_UNRECLAIMABLE_B }, > > > > +}; > > > > + > > > > +static int __init numa_stats_init(void) > > > > +{ > > > > + int i; > > > > + > > > > + for (i = 0; i < ARRAY_SIZE(numa_stats); i++) { > > > > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE > > > > + if (numa_stats[i].idx == NR_ANON_THPS) > > > > + numa_stats[i].ratio = HPAGE_PMD_SIZE; > > > > +#endif > > > > + } > > > > > > The for loop seems excessive but I don't really have a good alternative. > > > > Yeah, I also have no good alternative. The numa_stats is only initialized > > once. So there may be no problem :). > > > > > > > > > + > > > > + return 0; > > > > +} > > > > +pure_initcall(numa_stats_init); > > > > + > > > > +static unsigned long memcg_node_page_state(struct mem_cgroup *memcg, > > > > + unsigned int nid, > > > > + enum node_stat_item idx) > > > > +{ > > > > + VM_BUG_ON(nid >= nr_node_ids); > > > > + return lruvec_page_state(mem_cgroup_lruvec(memcg, NODE_DATA(nid)), idx); > > > > +} > > > > + > > > > +static const char *memory_numa_stat_format(struct mem_cgroup *memcg) > > > > +{ > > > > + int i; > > > > + struct seq_buf s; > > > > + > > > > + /* Reserve a byte for the trailing null */ > > > > + seq_buf_init(&s, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE - 1); > > > > + if (!s.buffer) > > > > + return NULL; > > > > + > > > > + for (i = 0; i < ARRAY_SIZE(numa_stats); i++) { > > > > + int nid; > > > > + > > > > + seq_buf_printf(&s, "%s", numa_stats[i].name); > > > > + for_each_node_state(nid, N_MEMORY) { > > > > + u64 size; > > > > + > > > > + size = memcg_node_page_state(memcg, nid, > > > > + numa_stats[i].idx); > > > > + size *= numa_stats[i].ratio; > > > > + seq_buf_printf(&s, " N%d=%llu", nid, size); > > > > + } > > > > + seq_buf_putc(&s, '\n'); > > > > + } > > > > + > > > > + /* The above should easily fit into one page */ > > > > + if (WARN_ON_ONCE(seq_buf_putc(&s, '\0'))) > > > > + s.buffer[PAGE_SIZE - 1] = '\0'; > > > > > > I think you should follow Michal's recommendation at > > > http://lkml.kernel.org/r/20200914115724.GO16999@xxxxxxxxxxxxxx > > > > Here is different, because the seq_buf_putc(&s, '\n') will not add \0 unless > > we use seq_buf_puts(&s, "\n"). > > > > Why a separate memory_numa_stat_format()? For memory_stat_format(), it > is called from two places. There is no need to have a separate > memory_numa_stat_format(). Similarly why not just call seq_printf() > instead of formatting into a seq_buf? I was indeed affected by memory_stat_format(). Thank you for making me sober. -- Yours, Muchun