From: Abel Wu <wuyun.wu@xxxxxxxxxx> The ALLOC_SLOWPATH statistics is missing in bulk allocation now. Fix it by doing statistics in alloc slow path. Signed-off-by: Abel Wu <wuyun.wu@xxxxxxxxxx> --- mm/slub.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/slub.c b/mm/slub.c index df93a5a0e9a4..5d89e4064f83 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2600,6 +2600,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, void *freelist; struct page *page; + stat(s, ALLOC_SLOWPATH); + page = c->page; if (!page) { /* @@ -2788,7 +2790,6 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, page = c->page; if (unlikely(!object || !node_match(page, node))) { object = __slab_alloc(s, gfpflags, node, addr, c); - stat(s, ALLOC_SLOWPATH); } else { void *next_object = get_freepointer_safe(s, object); -- 2.28.0.windows.1